var/home/core/zuul-output/0000755000175000017500000000000015110102733014516 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110124723015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005615004315110124712017673 0ustar rootrootNov 21 15:35:10 crc systemd[1]: Starting Kubernetes Kubelet... Nov 21 15:35:10 crc restorecon[4680]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:10 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 15:35:11 crc restorecon[4680]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 21 15:35:12 crc kubenswrapper[4967]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.272823 4967 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281925 4967 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281968 4967 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281977 4967 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281984 4967 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281991 4967 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.281999 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282004 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282012 4967 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282019 4967 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282025 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282031 4967 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282038 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282043 4967 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282049 4967 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282054 4967 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282060 4967 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282065 4967 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282070 4967 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282076 4967 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282081 4967 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282086 4967 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282091 4967 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282096 4967 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282101 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282106 4967 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282111 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282116 4967 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282128 4967 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282134 4967 feature_gate.go:330] unrecognized feature gate: Example Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282141 4967 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282148 4967 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282153 4967 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282158 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282163 4967 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282168 4967 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282173 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282179 4967 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282184 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282189 4967 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282194 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282200 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282205 4967 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282211 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282216 4967 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282221 4967 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282227 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282232 4967 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282237 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282242 4967 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282248 4967 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282253 4967 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282258 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282263 4967 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282268 4967 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282273 4967 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282278 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282284 4967 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282289 4967 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282296 4967 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282303 4967 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282334 4967 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282342 4967 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282349 4967 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282356 4967 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282362 4967 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282367 4967 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282373 4967 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282378 4967 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282383 4967 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282389 4967 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.282404 4967 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282587 4967 flags.go:64] FLAG: --address="0.0.0.0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282603 4967 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282612 4967 flags.go:64] FLAG: --anonymous-auth="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282620 4967 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282628 4967 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282636 4967 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282646 4967 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282653 4967 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282660 4967 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282666 4967 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282673 4967 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282681 4967 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282688 4967 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282694 4967 flags.go:64] FLAG: --cgroup-root="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282700 4967 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282706 4967 flags.go:64] FLAG: --client-ca-file="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282711 4967 flags.go:64] FLAG: --cloud-config="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282717 4967 flags.go:64] FLAG: --cloud-provider="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282723 4967 flags.go:64] FLAG: --cluster-dns="[]" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282731 4967 flags.go:64] FLAG: --cluster-domain="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282738 4967 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282745 4967 flags.go:64] FLAG: --config-dir="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282751 4967 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282758 4967 flags.go:64] FLAG: --container-log-max-files="5" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282765 4967 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282771 4967 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282778 4967 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282786 4967 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282801 4967 flags.go:64] FLAG: --contention-profiling="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282813 4967 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282821 4967 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282830 4967 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282836 4967 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282844 4967 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282851 4967 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282857 4967 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282863 4967 flags.go:64] FLAG: --enable-load-reader="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282869 4967 flags.go:64] FLAG: --enable-server="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282876 4967 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282884 4967 flags.go:64] FLAG: --event-burst="100" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282891 4967 flags.go:64] FLAG: --event-qps="50" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282897 4967 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282903 4967 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282909 4967 flags.go:64] FLAG: --eviction-hard="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282917 4967 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282924 4967 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282931 4967 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282939 4967 flags.go:64] FLAG: --eviction-soft="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282945 4967 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282952 4967 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282958 4967 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282964 4967 flags.go:64] FLAG: --experimental-mounter-path="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282969 4967 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282975 4967 flags.go:64] FLAG: --fail-swap-on="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282982 4967 flags.go:64] FLAG: --feature-gates="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282990 4967 flags.go:64] FLAG: --file-check-frequency="20s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.282996 4967 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283002 4967 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283008 4967 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283015 4967 flags.go:64] FLAG: --healthz-port="10248" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283021 4967 flags.go:64] FLAG: --help="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283027 4967 flags.go:64] FLAG: --hostname-override="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283033 4967 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283039 4967 flags.go:64] FLAG: --http-check-frequency="20s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283046 4967 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283051 4967 flags.go:64] FLAG: --image-credential-provider-config="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283058 4967 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283063 4967 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283069 4967 flags.go:64] FLAG: --image-service-endpoint="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283075 4967 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283081 4967 flags.go:64] FLAG: --kube-api-burst="100" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283087 4967 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283094 4967 flags.go:64] FLAG: --kube-api-qps="50" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283100 4967 flags.go:64] FLAG: --kube-reserved="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283122 4967 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283128 4967 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283134 4967 flags.go:64] FLAG: --kubelet-cgroups="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283141 4967 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283147 4967 flags.go:64] FLAG: --lock-file="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283153 4967 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283160 4967 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283166 4967 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283184 4967 flags.go:64] FLAG: --log-json-split-stream="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283191 4967 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283197 4967 flags.go:64] FLAG: --log-text-split-stream="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283203 4967 flags.go:64] FLAG: --logging-format="text" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283210 4967 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283216 4967 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283223 4967 flags.go:64] FLAG: --manifest-url="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283229 4967 flags.go:64] FLAG: --manifest-url-header="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283237 4967 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283243 4967 flags.go:64] FLAG: --max-open-files="1000000" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283251 4967 flags.go:64] FLAG: --max-pods="110" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283259 4967 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283266 4967 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283273 4967 flags.go:64] FLAG: --memory-manager-policy="None" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283280 4967 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283287 4967 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283293 4967 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283299 4967 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283333 4967 flags.go:64] FLAG: --node-status-max-images="50" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283339 4967 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283345 4967 flags.go:64] FLAG: --oom-score-adj="-999" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283351 4967 flags.go:64] FLAG: --pod-cidr="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283357 4967 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283367 4967 flags.go:64] FLAG: --pod-manifest-path="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283374 4967 flags.go:64] FLAG: --pod-max-pids="-1" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283380 4967 flags.go:64] FLAG: --pods-per-core="0" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283386 4967 flags.go:64] FLAG: --port="10250" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283400 4967 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283407 4967 flags.go:64] FLAG: --provider-id="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283413 4967 flags.go:64] FLAG: --qos-reserved="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283419 4967 flags.go:64] FLAG: --read-only-port="10255" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283425 4967 flags.go:64] FLAG: --register-node="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283431 4967 flags.go:64] FLAG: --register-schedulable="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283437 4967 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283448 4967 flags.go:64] FLAG: --registry-burst="10" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283454 4967 flags.go:64] FLAG: --registry-qps="5" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283460 4967 flags.go:64] FLAG: --reserved-cpus="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283468 4967 flags.go:64] FLAG: --reserved-memory="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283477 4967 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283483 4967 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283490 4967 flags.go:64] FLAG: --rotate-certificates="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283496 4967 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283502 4967 flags.go:64] FLAG: --runonce="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283508 4967 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283514 4967 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283521 4967 flags.go:64] FLAG: --seccomp-default="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283527 4967 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283533 4967 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283539 4967 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283546 4967 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283552 4967 flags.go:64] FLAG: --storage-driver-password="root" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283558 4967 flags.go:64] FLAG: --storage-driver-secure="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283564 4967 flags.go:64] FLAG: --storage-driver-table="stats" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283570 4967 flags.go:64] FLAG: --storage-driver-user="root" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283576 4967 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283583 4967 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283589 4967 flags.go:64] FLAG: --system-cgroups="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283595 4967 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283605 4967 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283610 4967 flags.go:64] FLAG: --tls-cert-file="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283617 4967 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283625 4967 flags.go:64] FLAG: --tls-min-version="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283631 4967 flags.go:64] FLAG: --tls-private-key-file="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283636 4967 flags.go:64] FLAG: --topology-manager-policy="none" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283642 4967 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283649 4967 flags.go:64] FLAG: --topology-manager-scope="container" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283655 4967 flags.go:64] FLAG: --v="2" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283663 4967 flags.go:64] FLAG: --version="false" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283671 4967 flags.go:64] FLAG: --vmodule="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283685 4967 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.283692 4967 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283840 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283850 4967 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283862 4967 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283870 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283877 4967 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283883 4967 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283889 4967 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283894 4967 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283900 4967 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283905 4967 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283911 4967 feature_gate.go:330] unrecognized feature gate: Example Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283916 4967 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283921 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283927 4967 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283933 4967 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283938 4967 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283943 4967 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283949 4967 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283954 4967 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283961 4967 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283967 4967 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283974 4967 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283981 4967 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283987 4967 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283993 4967 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.283999 4967 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284005 4967 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284010 4967 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284016 4967 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284021 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284030 4967 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284037 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284043 4967 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284048 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284054 4967 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284059 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284065 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284072 4967 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284080 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284086 4967 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284091 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284097 4967 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284103 4967 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284108 4967 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284113 4967 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284118 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284124 4967 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284129 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284134 4967 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284139 4967 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284144 4967 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284149 4967 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284155 4967 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284160 4967 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284165 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284170 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284175 4967 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284181 4967 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284186 4967 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284193 4967 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284199 4967 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284204 4967 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284213 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284219 4967 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284224 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284229 4967 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284235 4967 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284240 4967 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284245 4967 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284250 4967 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.284255 4967 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.284265 4967 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.297042 4967 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.297080 4967 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297151 4967 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297161 4967 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297167 4967 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297172 4967 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297177 4967 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297182 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297188 4967 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297194 4967 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297199 4967 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297204 4967 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297208 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297213 4967 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297217 4967 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297222 4967 feature_gate.go:330] unrecognized feature gate: Example Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297226 4967 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297231 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297235 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297239 4967 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297274 4967 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297279 4967 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297282 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297286 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297290 4967 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297295 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297298 4967 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297302 4967 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297337 4967 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297346 4967 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297351 4967 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297358 4967 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297362 4967 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297366 4967 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297369 4967 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297373 4967 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297377 4967 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297380 4967 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297383 4967 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297387 4967 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297390 4967 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297394 4967 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297397 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297401 4967 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297406 4967 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297412 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297417 4967 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297420 4967 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297424 4967 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297428 4967 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297432 4967 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297435 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297439 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297443 4967 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297447 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297451 4967 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297455 4967 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297458 4967 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297462 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297465 4967 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297469 4967 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297474 4967 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297478 4967 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297482 4967 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297487 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297490 4967 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297494 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297498 4967 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297502 4967 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297506 4967 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297510 4967 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297514 4967 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297518 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.297524 4967 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297635 4967 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297642 4967 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297646 4967 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297651 4967 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297655 4967 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297659 4967 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297662 4967 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297666 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297670 4967 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297673 4967 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297677 4967 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297680 4967 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297684 4967 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297688 4967 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297691 4967 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297695 4967 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297698 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297701 4967 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297705 4967 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297708 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297712 4967 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297715 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297720 4967 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297724 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297727 4967 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297731 4967 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297734 4967 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297738 4967 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297742 4967 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297746 4967 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297750 4967 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297754 4967 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297758 4967 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297763 4967 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297767 4967 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297771 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297775 4967 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297780 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297784 4967 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297787 4967 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297791 4967 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297795 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297798 4967 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297802 4967 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297806 4967 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297809 4967 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297813 4967 feature_gate.go:330] unrecognized feature gate: Example Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297816 4967 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297820 4967 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297823 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297828 4967 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297831 4967 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297835 4967 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297838 4967 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297843 4967 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297847 4967 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297851 4967 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297854 4967 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297857 4967 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297861 4967 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297865 4967 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297869 4967 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297873 4967 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297877 4967 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297881 4967 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297885 4967 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297889 4967 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297892 4967 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297896 4967 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297899 4967 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.297903 4967 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.297909 4967 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.298062 4967 server.go:940] "Client rotation is on, will bootstrap in background" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.303563 4967 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.303661 4967 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.305980 4967 server.go:997] "Starting client certificate rotation" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.306021 4967 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.307499 4967 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-04 09:34:06.617759692 +0000 UTC Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.307566 4967 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1049h58m54.310196689s for next certificate rotation Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.333639 4967 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.335635 4967 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.357304 4967 log.go:25] "Validated CRI v1 runtime API" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.394263 4967 log.go:25] "Validated CRI v1 image API" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.398323 4967 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.403680 4967 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-21-15-30-37-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.403721 4967 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.423546 4967 manager.go:217] Machine: {Timestamp:2025-11-21 15:35:12.417881169 +0000 UTC m=+0.676402207 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:edc01b1f-a566-4bc8-990a-79924b529553 BootID:aa4afe05-81a7-4f49-b297-5ea173e99e75 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:dd:07:a9 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:dd:07:a9 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:b3:81:ec Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:cf:90:a1 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:dc:bf:f1 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:cf:d0:14 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:6a:19:05:a4:ed:19 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:9e:53:06:11:b9:46 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.423830 4967 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.423991 4967 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.424326 4967 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.424545 4967 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.424586 4967 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.424799 4967 topology_manager.go:138] "Creating topology manager with none policy" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.424809 4967 container_manager_linux.go:303] "Creating device plugin manager" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.425285 4967 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.425367 4967 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.426254 4967 state_mem.go:36] "Initialized new in-memory state store" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.426667 4967 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.432304 4967 kubelet.go:418] "Attempting to sync node with API server" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.432402 4967 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.432426 4967 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.432449 4967 kubelet.go:324] "Adding apiserver pod source" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.432471 4967 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.438454 4967 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.440438 4967 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.444432 4967 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.445485 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.445494 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.445624 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.445625 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446422 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446456 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446466 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446475 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446489 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446498 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446506 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446521 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446530 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446539 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446551 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.446559 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.450778 4967 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.452104 4967 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.452341 4967 server.go:1280] "Started kubelet" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.453027 4967 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.453024 4967 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.453648 4967 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.453886 4967 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.453912 4967 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.454033 4967 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.454080 4967 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 21 15:35:12 crc systemd[1]: Started Kubernetes Kubelet. Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.454123 4967 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 22:02:04.435335592 +0000 UTC Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.454189 4967 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 654h26m51.981150029s for next certificate rotation Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.454228 4967 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.454034 4967 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.454522 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.454582 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.455393 4967 factory.go:55] Registering systemd factory Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.455421 4967 factory.go:221] Registration of the systemd container factory successfully Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.455623 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="200ms" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.456128 4967 factory.go:153] Registering CRI-O factory Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.456191 4967 factory.go:221] Registration of the crio container factory successfully Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.456411 4967 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.456454 4967 factory.go:103] Registering Raw factory Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.456479 4967 manager.go:1196] Started watching for new ooms in manager Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.457857 4967 manager.go:319] Starting recovery of all containers Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.458127 4967 server.go:460] "Adding debug handlers to kubelet server" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.460979 4967 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.148:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a0f8f5b1c3b8d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-21 15:35:12.452287373 +0000 UTC m=+0.710808381,LastTimestamp:2025-11-21 15:35:12.452287373 +0000 UTC m=+0.710808381,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464539 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464623 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464653 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464668 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464679 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464688 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464697 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464707 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464735 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464746 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464757 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464765 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464777 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464787 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464817 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464832 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464847 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464857 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464867 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464909 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464933 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464944 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464956 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.464988 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465000 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465010 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465026 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465035 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465065 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465075 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465083 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465578 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465591 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465599 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465608 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465618 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465628 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465639 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465649 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465659 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465668 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465678 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465688 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465697 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465706 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465718 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465727 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465737 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465747 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465756 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465765 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465776 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465789 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465833 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465844 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465853 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465862 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465872 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465882 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465891 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465922 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465933 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465942 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465951 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465966 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465974 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465983 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.465991 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466001 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466009 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466019 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466027 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466035 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466045 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466054 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466062 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466070 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466078 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466086 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466094 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466103 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466110 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466120 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466140 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466157 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466168 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466180 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466193 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466201 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466209 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.466219 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.469803 4967 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.469955 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470026 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470093 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470159 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470221 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470920 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.470989 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471021 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471035 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471048 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471071 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471086 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471102 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471135 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471164 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471186 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471496 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471513 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471536 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471649 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471683 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471707 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471722 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471738 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471751 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471767 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471784 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471799 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471817 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471941 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471964 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471982 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.471998 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472017 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472034 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472069 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472114 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472138 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472159 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472174 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472187 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472206 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472222 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472239 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472280 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472294 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472326 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472338 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472355 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472368 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472389 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472476 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472493 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472526 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472537 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472548 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472656 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472669 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472707 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472853 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472950 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.472967 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473377 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473406 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473500 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473519 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473664 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473680 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473737 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473778 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.473987 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474006 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474023 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474544 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474566 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474665 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474804 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474896 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474936 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474949 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474963 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.474973 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.480924 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.481085 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.481162 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.481336 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484482 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484576 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484593 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484607 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484621 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484635 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484650 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484691 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484716 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484739 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484755 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484772 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484788 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484801 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484814 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484828 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484843 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484856 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484869 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484884 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484897 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484911 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484922 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484934 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484949 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484964 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484976 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.484989 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.485001 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.485012 4967 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.485026 4967 reconstruct.go:97] "Volume reconstruction finished" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.485036 4967 reconciler.go:26] "Reconciler: start to sync state" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.489217 4967 manager.go:324] Recovery completed Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.497535 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499053 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499101 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499113 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499960 4967 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499981 4967 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.499999 4967 state_mem.go:36] "Initialized new in-memory state store" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.532951 4967 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.534874 4967 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.534931 4967 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.534975 4967 kubelet.go:2335] "Starting kubelet main sync loop" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.535041 4967 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 21 15:35:12 crc kubenswrapper[4967]: W1121 15:35:12.552480 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.553446 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.554413 4967 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.635766 4967 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.655151 4967 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.656815 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="400ms" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.708748 4967 policy_none.go:49] "None policy: Start" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.710791 4967 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.710853 4967 state_mem.go:35] "Initializing new in-memory state store" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.755289 4967 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.802738 4967 manager.go:334] "Starting Device Plugin manager" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.802875 4967 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.802889 4967 server.go:79] "Starting device plugin registration server" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.803280 4967 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.803303 4967 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.803418 4967 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.803568 4967 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.803590 4967 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.813210 4967 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.836856 4967 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.836997 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839348 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839358 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839516 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839708 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.839747 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840766 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840902 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840923 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.840976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841016 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841595 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841634 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841964 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.841975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.842167 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.842471 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.842528 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843079 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843097 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843201 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843302 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843620 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843646 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843956 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843986 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.843971 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844034 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844093 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844116 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844525 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844566 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844830 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844916 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.844928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.845548 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.845575 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.845584 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890807 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890835 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890866 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890894 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890925 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.890961 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891078 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891211 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891253 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891336 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891376 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891395 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891420 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.891464 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.903688 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.904842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.904877 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.904897 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.904957 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:12 crc kubenswrapper[4967]: E1121 15:35:12.905438 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.992936 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993018 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993044 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993058 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993078 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993104 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993130 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993148 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993155 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993174 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993196 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993196 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993231 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993257 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993273 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993273 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993198 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993302 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993368 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993276 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993394 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993372 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993361 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993370 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993279 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993342 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993487 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993554 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993592 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:12 crc kubenswrapper[4967]: I1121 15:35:12.993699 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.057751 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="800ms" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.105645 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.107029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.107082 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.107093 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.107133 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.107755 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.174605 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.197945 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.212382 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.218529 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-34d6594aca7ece027f9f6957fecb7e26c5a24841bf86a891edba554c728f5b51 WatchSource:0}: Error finding container 34d6594aca7ece027f9f6957fecb7e26c5a24841bf86a891edba554c728f5b51: Status 404 returned error can't find the container with id 34d6594aca7ece027f9f6957fecb7e26c5a24841bf86a891edba554c728f5b51 Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.228794 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.230543 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b5fe90a1c92d2570438572b0a9036ddc5d5fc11f9a421854a455e610624c381c WatchSource:0}: Error finding container b5fe90a1c92d2570438572b0a9036ddc5d5fc11f9a421854a455e610624c381c: Status 404 returned error can't find the container with id b5fe90a1c92d2570438572b0a9036ddc5d5fc11f9a421854a455e610624c381c Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.235813 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.239399 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-d39c469917acdc36ff3a26d29c4dd5b90e911f240073d87b3700dea11306ad18 WatchSource:0}: Error finding container d39c469917acdc36ff3a26d29c4dd5b90e911f240073d87b3700dea11306ad18: Status 404 returned error can't find the container with id d39c469917acdc36ff3a26d29c4dd5b90e911f240073d87b3700dea11306ad18 Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.243727 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-b360b6d0d407c28656444eb21a3cf8a7833232b855bc838adbb3a863f5df2045 WatchSource:0}: Error finding container b360b6d0d407c28656444eb21a3cf8a7833232b855bc838adbb3a863f5df2045: Status 404 returned error can't find the container with id b360b6d0d407c28656444eb21a3cf8a7833232b855bc838adbb3a863f5df2045 Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.260804 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-b848024c24877d03c9d8c371f8ab040b9524fbb31eff7c2733f4e365a318b795 WatchSource:0}: Error finding container b848024c24877d03c9d8c371f8ab040b9524fbb31eff7c2733f4e365a318b795: Status 404 returned error can't find the container with id b848024c24877d03c9d8c371f8ab040b9524fbb31eff7c2733f4e365a318b795 Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.453454 4967 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.460563 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.460637 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.508796 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.510636 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.510699 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.510709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.510741 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.511271 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.539344 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b848024c24877d03c9d8c371f8ab040b9524fbb31eff7c2733f4e365a318b795"} Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.540505 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b360b6d0d407c28656444eb21a3cf8a7833232b855bc838adbb3a863f5df2045"} Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.541501 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d39c469917acdc36ff3a26d29c4dd5b90e911f240073d87b3700dea11306ad18"} Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.542382 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b5fe90a1c92d2570438572b0a9036ddc5d5fc11f9a421854a455e610624c381c"} Nov 21 15:35:13 crc kubenswrapper[4967]: I1121 15:35:13.544353 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"34d6594aca7ece027f9f6957fecb7e26c5a24841bf86a891edba554c728f5b51"} Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.600481 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.600575 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.664858 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.664938 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.858777 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="1.6s" Nov 21 15:35:13 crc kubenswrapper[4967]: W1121 15:35:13.869196 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:13 crc kubenswrapper[4967]: E1121 15:35:13.869405 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.312237 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.314488 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.314530 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.314539 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.314571 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:14 crc kubenswrapper[4967]: E1121 15:35:14.315144 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.453503 4967 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.550135 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.550179 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.550193 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.551743 4967 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f" exitCode=0 Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.556697 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.559319 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.560936 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.560972 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.560981 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.561174 4967 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f6c055732cc47d59e7fd200ac9b70b70bbece54eac14ca4db1bcbf765cab9c2c" exitCode=0 Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.561269 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f6c055732cc47d59e7fd200ac9b70b70bbece54eac14ca4db1bcbf765cab9c2c"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.561293 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.562296 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.562337 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.562351 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.562831 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.563709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.563753 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.563767 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.564437 4967 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="ffb7b3741903cd837195627440e1c4436f00b155998f350b4783615ade3a0bcb" exitCode=0 Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.564462 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"ffb7b3741903cd837195627440e1c4436f00b155998f350b4783615ade3a0bcb"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.564548 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.565764 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.565797 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.565809 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.567557 4967 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6" exitCode=0 Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.567593 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6"} Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.567757 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.568559 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.568590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:14 crc kubenswrapper[4967]: I1121 15:35:14.568601 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.453662 4967 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:15 crc kubenswrapper[4967]: E1121 15:35:15.460669 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.148:6443: connect: connection refused" interval="3.2s" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.573418 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.573481 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.574454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.574481 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.574489 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577185 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577234 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577251 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577264 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577277 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.577288 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.580729 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.580781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.580794 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.582394 4967 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d9978082d4051bfd7e5fc959d6c6f6440abcbc33914499790531c323a708d837" exitCode=0 Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.582508 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d9978082d4051bfd7e5fc959d6c6f6440abcbc33914499790531c323a708d837"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.582657 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.583515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.583556 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.583579 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.584930 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.585049 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"676c95cb57812034e79459f98d501941a5b31d61ac91866badff8869a367711b"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.585870 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.585901 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.585909 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.587851 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.587887 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.587898 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb"} Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.587985 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.590285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.590326 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.590339 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: W1121 15:35:15.785007 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:15 crc kubenswrapper[4967]: E1121 15:35:15.785129 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.915940 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.917238 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.917264 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.917274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:15 crc kubenswrapper[4967]: I1121 15:35:15.917296 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:15 crc kubenswrapper[4967]: E1121 15:35:15.917659 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.148:6443: connect: connection refused" node="crc" Nov 21 15:35:16 crc kubenswrapper[4967]: W1121 15:35:16.331502 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:16 crc kubenswrapper[4967]: E1121 15:35:16.331914 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.453589 4967 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.591865 4967 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3956d9aa538fb9e16dbe6226a7b3312bb3c3708f878afbfc3202768a9572c1f9" exitCode=0 Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.591992 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592044 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592091 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3956d9aa538fb9e16dbe6226a7b3312bb3c3708f878afbfc3202768a9572c1f9"} Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592148 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592213 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592270 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592336 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592613 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592911 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.592925 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593165 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593191 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593200 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593507 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593589 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593611 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593613 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593750 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.593763 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.594027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.594077 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:16 crc kubenswrapper[4967]: I1121 15:35:16.594093 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:16 crc kubenswrapper[4967]: W1121 15:35:16.760657 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:16 crc kubenswrapper[4967]: E1121 15:35:16.760753 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:16 crc kubenswrapper[4967]: W1121 15:35:16.765459 4967 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.148:6443: connect: connection refused Nov 21 15:35:16 crc kubenswrapper[4967]: E1121 15:35:16.765521 4967 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.148:6443: connect: connection refused" logger="UnhandledError" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.412587 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600078 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f850992b94227da13753a76e85a1911d5f3bd0bbd731b2695d40764f00233974"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600117 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e5abf260b8d0b26a368876ba98a0a2d4651fa981eb07021f0cea886520ce2fb5"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600129 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f135bb4f696f2ef5da8aae2d8263223226d54bce18777af711d07b200c52af43"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600138 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"17c903ad4751e09495ae5e924f4b58f098e10f97dd1acc687dad738648f5ac6d"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600146 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"bbb88ef84ad920e2e33db2a1bac9146c7c118a44439c6c5efe251ac726b5555b"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600160 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600925 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.600957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.602160 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.603518 4967 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a" exitCode=255 Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.603557 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a"} Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.603627 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.604301 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.604364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.604377 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:17 crc kubenswrapper[4967]: I1121 15:35:17.604928 4967 scope.go:117] "RemoveContainer" containerID="4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.609838 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.611546 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.612010 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.612303 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d"} Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.612754 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.612778 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.612787 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.613410 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.613429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.613437 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.718180 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 21 15:35:18 crc kubenswrapper[4967]: I1121 15:35:18.908005 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.118250 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.119664 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.119710 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.119725 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.119754 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.614488 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.614511 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.614473 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.615724 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.615757 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.615768 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.616614 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.616639 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.616649 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.970749 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.971009 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.972486 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.972540 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:19 crc kubenswrapper[4967]: I1121 15:35:19.972558 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.055665 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.055924 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.057761 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.057797 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.057809 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.616804 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.617977 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.618031 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.618048 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.643666 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.643901 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.645171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.645213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:20 crc kubenswrapper[4967]: I1121 15:35:20.645225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:22 crc kubenswrapper[4967]: E1121 15:35:22.813337 4967 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 15:35:23 crc kubenswrapper[4967]: I1121 15:35:23.444989 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:23 crc kubenswrapper[4967]: I1121 15:35:23.445457 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:23 crc kubenswrapper[4967]: I1121 15:35:23.447382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:23 crc kubenswrapper[4967]: I1121 15:35:23.447569 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:23 crc kubenswrapper[4967]: I1121 15:35:23.447657 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.747977 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.748242 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.749844 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.749922 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.749940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.755733 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:24 crc kubenswrapper[4967]: I1121 15:35:24.806087 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:25 crc kubenswrapper[4967]: I1121 15:35:25.630151 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:25 crc kubenswrapper[4967]: I1121 15:35:25.631563 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:25 crc kubenswrapper[4967]: I1121 15:35:25.631707 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:25 crc kubenswrapper[4967]: I1121 15:35:25.631796 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:25 crc kubenswrapper[4967]: I1121 15:35:25.634964 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.345635 4967 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.345709 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.350097 4967 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.350139 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.632404 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.633407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.633448 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:26 crc kubenswrapper[4967]: I1121 15:35:26.633458 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.419367 4967 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]log ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]etcd ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/generic-apiserver-start-informers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/priority-and-fairness-filter ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-apiextensions-informers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-apiextensions-controllers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/crd-informer-synced ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-system-namespaces-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/bootstrap-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/start-kube-aggregator-informers ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-registration-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-discovery-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]autoregister-completion ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-openapi-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 21 15:35:27 crc kubenswrapper[4967]: livez check failed Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.419923 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.634439 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.635533 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.635588 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.635600 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.807549 4967 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 15:35:27 crc kubenswrapper[4967]: I1121 15:35:27.807621 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.752869 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.753171 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.756869 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.756932 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.756945 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:28 crc kubenswrapper[4967]: I1121 15:35:28.775426 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 21 15:35:29 crc kubenswrapper[4967]: I1121 15:35:29.641181 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:29 crc kubenswrapper[4967]: I1121 15:35:29.642532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:29 crc kubenswrapper[4967]: I1121 15:35:29.642682 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:29 crc kubenswrapper[4967]: I1121 15:35:29.642762 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.319691 4967 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.321749 4967 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.324398 4967 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.331879 4967 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.332871 4967 trace.go:236] Trace[1262074478]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 15:35:20.295) (total time: 11036ms): Nov 21 15:35:31 crc kubenswrapper[4967]: Trace[1262074478]: ---"Objects listed" error: 11036ms (15:35:31.332) Nov 21 15:35:31 crc kubenswrapper[4967]: Trace[1262074478]: [11.036985817s] [11.036985817s] END Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.332912 4967 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.333294 4967 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.334721 4967 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.443967 4967 apiserver.go:52] "Watching apiserver" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.454172 4967 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.454600 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.455819 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.456008 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.456071 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.456183 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.456274 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.456329 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.456373 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.456388 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.456290 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.458560 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.458747 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459007 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459193 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459305 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459349 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459803 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459894 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.459939 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.497178 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.512695 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.525556 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.537205 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.548803 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.555626 4967 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.559802 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.575530 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.635956 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636059 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636494 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636497 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636094 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636583 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.636838 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.637725 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.637876 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.637904 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638392 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638453 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638475 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638754 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638808 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.638834 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639085 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639158 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639109 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639183 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639225 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639246 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639544 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639609 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639654 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639672 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.639902 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640042 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640080 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640112 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640131 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640317 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640655 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640738 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640798 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640851 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640874 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.640996 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641331 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641414 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641469 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641490 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641411 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641509 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641511 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641588 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641618 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641643 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641665 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641690 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641711 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641733 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.641756 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642008 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642064 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642078 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642260 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642326 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642414 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642440 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642497 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642510 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642664 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642762 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642767 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642804 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642832 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642925 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642947 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643020 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.642959 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643035 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643061 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643073 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643111 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643139 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643163 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643181 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643201 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643258 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643287 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643351 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643402 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643419 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643439 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643496 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643524 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643582 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643593 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643746 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643829 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643612 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644003 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644030 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644050 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644068 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644093 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644135 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644155 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644176 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644200 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644210 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644220 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644284 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644324 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644347 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644371 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644392 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644421 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644445 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644470 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644492 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644518 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644550 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644573 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644597 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.643997 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644360 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644432 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644544 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644640 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644674 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644760 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644802 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644842 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644901 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645167 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645329 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645468 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645466 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645818 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645849 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.645934 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.644619 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646246 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646253 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646330 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646345 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646466 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646497 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646501 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646504 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646587 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646605 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646631 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646638 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646652 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646680 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646704 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646734 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646762 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646793 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646814 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646823 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646856 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646898 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646929 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646951 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646958 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646972 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.646990 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647008 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647026 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647046 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647064 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647090 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647109 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647126 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647144 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647161 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647179 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647185 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647202 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647226 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647246 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647279 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647333 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647358 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647382 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647399 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647418 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647435 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647453 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647476 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647495 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647514 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647516 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647533 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647563 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647588 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647610 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647630 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647652 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647673 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647691 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647713 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647798 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647823 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647516 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647617 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647732 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647665 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647949 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647967 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650370 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650370 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650593 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650475 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650585 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650642 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650821 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.650923 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.651135 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.651708 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.652118 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.652132 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.652739 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.652999 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653241 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653430 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653498 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653518 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653588 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653591 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653768 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653974 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653797 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.653865 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654105 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.647843 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654350 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654363 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654401 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654429 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654449 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654470 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654492 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654517 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654540 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654560 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654580 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654600 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654617 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654637 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654805 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654816 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654831 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654852 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654875 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654895 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654913 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654930 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654948 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654966 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654987 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655006 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655025 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655044 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655064 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655081 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655098 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655116 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655133 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655152 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655168 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655184 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655206 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655298 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655339 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655358 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655376 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655396 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655414 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655437 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655455 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655472 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655491 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655511 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655527 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655543 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655595 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655612 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655628 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655647 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655669 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655689 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655708 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655731 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655750 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655769 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655788 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655806 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655823 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654843 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.654955 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655298 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.656100 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.656269 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.656335 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.656483 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.656738 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657125 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657243 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657620 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657778 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657872 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.657945 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.658057 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:35:32.158030868 +0000 UTC m=+20.416551876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658334 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658550 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.655840 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658621 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658649 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658703 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658750 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658839 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658889 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658925 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.658956 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659033 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659067 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659139 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659171 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659175 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659184 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659203 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659231 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659259 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659283 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659324 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659353 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.659476 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660031 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660350 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660427 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660456 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660473 4967 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660489 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660503 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660518 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660533 4967 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660548 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660564 4967 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660579 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660598 4967 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660616 4967 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660631 4967 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660646 4967 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660661 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660673 4967 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660687 4967 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660700 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660725 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660742 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660755 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660768 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660782 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660795 4967 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660808 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660822 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660839 4967 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660853 4967 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660870 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660884 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660899 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660948 4967 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660964 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660981 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660995 4967 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661009 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661025 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661039 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661052 4967 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661065 4967 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661078 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661090 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661102 4967 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661114 4967 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661127 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661139 4967 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661151 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661164 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661179 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661193 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661208 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661222 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661235 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661250 4967 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661264 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661278 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661292 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661304 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661334 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661346 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661358 4967 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661370 4967 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661380 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661390 4967 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661402 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661413 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661422 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661437 4967 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661449 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661464 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661474 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661484 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661500 4967 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661510 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661521 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661532 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661542 4967 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661552 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661562 4967 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661572 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661583 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661594 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661606 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661616 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661625 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661638 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661648 4967 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661657 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661670 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662449 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662483 4967 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662511 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662533 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.663657 4967 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665445 4967 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665489 4967 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665510 4967 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665608 4967 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665630 4967 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665653 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665674 4967 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665698 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665717 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665737 4967 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665755 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665778 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665799 4967 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665820 4967 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665841 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665862 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665909 4967 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665935 4967 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665959 4967 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.665984 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666014 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666033 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666055 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666076 4967 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666096 4967 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666114 4967 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666132 4967 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.666153 4967 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662059 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660354 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660677 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.660879 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661522 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661919 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.662005 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.661901 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.663915 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.666535 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:32.16649229 +0000 UTC m=+20.425013338 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.664013 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.666631 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:32.166612654 +0000 UTC m=+20.425133842 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.673111 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.675624 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.676856 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.677407 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.677496 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.677574 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.677946 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.677962 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678111 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678142 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678217 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678284 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678337 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678529 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.678543 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.679353 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.681059 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.681648 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.682198 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.682343 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.683225 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.683559 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.683527 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.683927 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.683962 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.683983 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.684023 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.684077 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:32.184047391 +0000 UTC m=+20.442568409 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.684196 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.684237 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.684259 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:31 crc kubenswrapper[4967]: E1121 15:35:31.684364 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:32.184332909 +0000 UTC m=+20.442854127 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.684483 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.684725 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.684989 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.686437 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.686537 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.686891 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.687077 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.687395 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.687709 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.687717 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.688337 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.689767 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.692513 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.693820 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.699990 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.700588 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.700629 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.700685 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701050 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701219 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701385 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701629 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701637 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701712 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701746 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.701913 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.702214 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.702195 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.702509 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.702581 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.702779 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703408 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703481 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703524 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703567 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703869 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703952 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.703953 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704009 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704404 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704475 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704519 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.704754 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.727928 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.735239 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767284 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767414 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767462 4967 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767479 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767493 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767507 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767521 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767535 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767548 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767561 4967 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767574 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767587 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767600 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767613 4967 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767618 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767626 4967 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767665 4967 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767954 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767971 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767982 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.767993 4967 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768004 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768013 4967 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768023 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768033 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768042 4967 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768051 4967 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768061 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768070 4967 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768080 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768089 4967 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768099 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768108 4967 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768118 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768127 4967 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768137 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768146 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768156 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768168 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768178 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768197 4967 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768209 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768220 4967 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768230 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768241 4967 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768253 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768264 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768274 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768284 4967 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768294 4967 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768304 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.768330 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769502 4967 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769542 4967 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769553 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769565 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769588 4967 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769597 4967 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769607 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769620 4967 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769629 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769640 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769651 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769664 4967 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769674 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769683 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769864 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769878 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769889 4967 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769898 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769934 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769945 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769955 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769963 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.769976 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.777816 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.778178 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.787476 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 15:35:31 crc kubenswrapper[4967]: I1121 15:35:31.796714 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 15:35:31 crc kubenswrapper[4967]: W1121 15:35:31.801550 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-4fefd6464c5e792f9bc1a84f736dc727a7929cfb244138ae2fa783f3c655d4a9 WatchSource:0}: Error finding container 4fefd6464c5e792f9bc1a84f736dc727a7929cfb244138ae2fa783f3c655d4a9: Status 404 returned error can't find the container with id 4fefd6464c5e792f9bc1a84f736dc727a7929cfb244138ae2fa783f3c655d4a9 Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.173499 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.173611 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.173640 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.173789 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.173867 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:33.173846076 +0000 UTC m=+21.432367084 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.174187 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.174272 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:35:33.174229307 +0000 UTC m=+21.432750415 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.174360 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:33.174348 +0000 UTC m=+21.432869178 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.274232 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.274286 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274512 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274535 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274550 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274576 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274629 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274675 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274628 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:33.274606906 +0000 UTC m=+21.533127924 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.274880 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:33.274803681 +0000 UTC m=+21.533324729 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.275199 4967 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.275277 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.419223 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.420119 4967 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.420255 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.423867 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.433358 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.447722 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.459419 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.472813 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.485382 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.499893 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.513137 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.522121 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.528045 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.535691 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.535815 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.544864 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.546085 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.548662 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.549854 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.550107 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.551349 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.552532 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.554757 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.555629 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.557494 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.558851 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.561059 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.562690 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.563773 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.564593 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.565294 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.566101 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.566940 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.567547 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.568366 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.568493 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.569189 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.569855 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.571591 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.572716 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.575477 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.576894 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.579283 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.580906 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.581970 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.583909 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.584920 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.585731 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.586114 4967 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.586253 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.588131 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.589133 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.589927 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.591509 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.592253 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.593373 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.594024 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.595147 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.595650 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.596695 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.597729 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.598454 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.599299 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.599923 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.600897 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.601025 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.601689 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.602188 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.603021 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.603544 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.604575 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.605175 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.605728 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.621078 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:16Z\\\",\\\"message\\\":\\\"W1121 15:35:15.620629 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 15:35:15.620959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763739315 cert, and key in /tmp/serving-cert-3889716643/serving-signer.crt, /tmp/serving-cert-3889716643/serving-signer.key\\\\nI1121 15:35:16.183861 1 observer_polling.go:159] Starting file observer\\\\nW1121 15:35:16.189922 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 15:35:16.190152 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:16.191994 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3889716643/tls.crt::/tmp/serving-cert-3889716643/tls.key\\\\\\\"\\\\nF1121 15:35:16.550088 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.636094 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.654488 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.655803 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.658279 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.658802 4967 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d" exitCode=255 Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.658920 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.659031 4967 scope.go:117] "RemoveContainer" containerID="4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.660881 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8edf4704ca01a8206f75532cb2cef3fd6f9c43457c90218107cdba4b06758371"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.663865 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.664371 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.664567 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4fefd6464c5e792f9bc1a84f736dc727a7929cfb244138ae2fa783f3c655d4a9"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.666979 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc"} Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.667085 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e1a0cfef8a02389f97e7231f5989a696b481f084b465316fe403ac7271ba5e97"} Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.668993 4967 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.669260 4967 scope.go:117] "RemoveContainer" containerID="f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d" Nov 21 15:35:32 crc kubenswrapper[4967]: E1121 15:35:32.669618 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.679773 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.694464 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.712560 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.742383 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.765401 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.786932 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.803044 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.822161 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a0cd640a05a49878fe715757235bf013c68a378811d99cc08c97074c5912c6a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:16Z\\\",\\\"message\\\":\\\"W1121 15:35:15.620629 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 15:35:15.620959 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763739315 cert, and key in /tmp/serving-cert-3889716643/serving-signer.crt, /tmp/serving-cert-3889716643/serving-signer.key\\\\nI1121 15:35:16.183861 1 observer_polling.go:159] Starting file observer\\\\nW1121 15:35:16.189922 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 15:35:16.190152 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:16.191994 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3889716643/tls.crt::/tmp/serving-cert-3889716643/tls.key\\\\\\\"\\\\nF1121 15:35:16.550088 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.838718 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.853894 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:32 crc kubenswrapper[4967]: I1121 15:35:32.867840 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.184412 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.184868 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:35:35.184841326 +0000 UTC m=+23.443362334 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.185045 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.185128 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.185552 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.185657 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:35.185648678 +0000 UTC m=+23.444169686 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.186117 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.186212 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:35.186203353 +0000 UTC m=+23.444724361 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.286677 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.287104 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.287159 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.287401 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.287537 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.287695 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:35.287667352 +0000 UTC m=+23.546188380 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.287277 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.288379 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.288483 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.288620 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:35.288602078 +0000 UTC m=+23.547123106 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.535573 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.535655 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.536532 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.536589 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.671129 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.674541 4967 scope.go:117] "RemoveContainer" containerID="f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d" Nov 21 15:35:33 crc kubenswrapper[4967]: E1121 15:35:33.674735 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.689992 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.704265 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.720333 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.734506 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.749468 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.762807 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:33 crc kubenswrapper[4967]: I1121 15:35:33.778774 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.535762 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:34 crc kubenswrapper[4967]: E1121 15:35:34.536017 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.679684 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949"} Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.699091 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.718699 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.738530 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.754010 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.770250 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.790996 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.805577 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.810464 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.814265 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.821611 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.825930 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.841916 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.858798 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.876619 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.892472 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.909885 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.924613 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.938563 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.951897 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.967292 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:34 crc kubenswrapper[4967]: I1121 15:35:34.997252 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.026779 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:35Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.041154 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:35Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.055453 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:35Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.071282 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:35Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.203574 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.203710 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.203746 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.203821 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.203855 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.203876 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:35:39.203840373 +0000 UTC m=+27.462361401 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.203932 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:39.203918055 +0000 UTC m=+27.462439083 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.203957 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:39.203942526 +0000 UTC m=+27.462463554 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.304810 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.305221 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305129 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305443 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305517 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305664 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:39.305637951 +0000 UTC m=+27.564158959 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305446 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305824 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305880 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.305970 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:39.30595759 +0000 UTC m=+27.564478598 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.536218 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:35 crc kubenswrapper[4967]: I1121 15:35:35.536281 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.537624 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:35 crc kubenswrapper[4967]: E1121 15:35:35.537513 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.365101 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-8srmv"] Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.366011 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.366412 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-c8qfd"] Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.366936 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.375143 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.375483 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.375994 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.376185 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.376690 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.377024 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.389329 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.400558 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.416097 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-serviceca\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.416175 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktz85\" (UniqueName: \"kubernetes.io/projected/9af74498-5bb0-49a3-bf13-2ea73a127539-kube-api-access-ktz85\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.416270 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-host\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.416298 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r49vd\" (UniqueName: \"kubernetes.io/projected/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-kube-api-access-r49vd\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.416456 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9af74498-5bb0-49a3-bf13-2ea73a127539-hosts-file\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.418738 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.439825 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.465057 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.489010 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.515724 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.516960 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-serviceca\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517007 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktz85\" (UniqueName: \"kubernetes.io/projected/9af74498-5bb0-49a3-bf13-2ea73a127539-kube-api-access-ktz85\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517025 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-host\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517042 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r49vd\" (UniqueName: \"kubernetes.io/projected/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-kube-api-access-r49vd\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517063 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9af74498-5bb0-49a3-bf13-2ea73a127539-hosts-file\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517146 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9af74498-5bb0-49a3-bf13-2ea73a127539-hosts-file\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.517205 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-host\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.519455 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-serviceca\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.536017 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:36 crc kubenswrapper[4967]: E1121 15:35:36.536161 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.541269 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r49vd\" (UniqueName: \"kubernetes.io/projected/62aa85b6-33c6-4631-8877-e4d4f4f8bb16-kube-api-access-r49vd\") pod \"node-ca-c8qfd\" (UID: \"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\") " pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.552025 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktz85\" (UniqueName: \"kubernetes.io/projected/9af74498-5bb0-49a3-bf13-2ea73a127539-kube-api-access-ktz85\") pod \"node-resolver-8srmv\" (UID: \"9af74498-5bb0-49a3-bf13-2ea73a127539\") " pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.553808 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.613588 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.636749 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.657721 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.677534 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.680785 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-8srmv" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.689656 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-c8qfd" Nov 21 15:35:36 crc kubenswrapper[4967]: W1121 15:35:36.694819 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9af74498_5bb0_49a3_bf13_2ea73a127539.slice/crio-f3880684d938755bec08c19d42317ca00a1767afa62abee8fdaf5788f60adf16 WatchSource:0}: Error finding container f3880684d938755bec08c19d42317ca00a1767afa62abee8fdaf5788f60adf16: Status 404 returned error can't find the container with id f3880684d938755bec08c19d42317ca00a1767afa62abee8fdaf5788f60adf16 Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.700782 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: W1121 15:35:36.710114 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod62aa85b6_33c6_4631_8877_e4d4f4f8bb16.slice/crio-e9b5c905274201616c054f0be66cb61eec515e4f0623c9f1784d1517d0866e5d WatchSource:0}: Error finding container e9b5c905274201616c054f0be66cb61eec515e4f0623c9f1784d1517d0866e5d: Status 404 returned error can't find the container with id e9b5c905274201616c054f0be66cb61eec515e4f0623c9f1784d1517d0866e5d Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.728823 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.746454 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.765598 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.783816 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.795601 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.811843 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:36 crc kubenswrapper[4967]: I1121 15:35:36.824735 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:36Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.297448 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-j4dcx"] Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.297872 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-lrth2"] Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.298094 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.298150 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.298104 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-jpln7"] Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.299166 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zm492"] Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.299430 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.300387 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.301303 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.301417 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.302366 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.302806 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.302902 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303078 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303185 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303200 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303269 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303279 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303456 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303673 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303838 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303880 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.303898 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.304036 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.304052 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.304129 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.304192 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.326054 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.344992 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.361527 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.377024 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.401472 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.412726 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441419 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-hostroot\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441472 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-conf-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441501 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441524 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441542 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-cni-binary-copy\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441562 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-daemon-config\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441584 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441766 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441921 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-system-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441953 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-k8s-cni-cncf-io\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.441989 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-cnibin\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442071 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442145 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-netns\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442164 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jv5wt\" (UniqueName: \"kubernetes.io/projected/629a5f41-3cd8-4518-a833-2832f4ebe55a-kube-api-access-jv5wt\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442182 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ljns\" (UniqueName: \"kubernetes.io/projected/8f12a156-8db0-49be-a048-e7c4988f9cd0-kube-api-access-7ljns\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442200 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-binary-copy\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442226 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-socket-dir-parent\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442340 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442411 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442441 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-etc-kubernetes\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442468 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442495 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442522 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jssfh\" (UniqueName: \"kubernetes.io/projected/301ed826-105f-43b3-b553-38186c8cc1be-kube-api-access-jssfh\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442588 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442633 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-multus\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442690 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvcsc\" (UniqueName: \"kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442715 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442777 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-multus-certs\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442799 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442815 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-os-release\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442832 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442849 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442870 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442890 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f12a156-8db0-49be-a048-e7c4988f9cd0-mcd-auth-proxy-config\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442910 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442927 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-system-cni-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442947 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8f12a156-8db0-49be-a048-e7c4988f9cd0-rootfs\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442969 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.442988 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443006 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443049 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-cnibin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443067 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-bin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443087 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-kubelet\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443106 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f12a156-8db0-49be-a048-e7c4988f9cd0-proxy-tls\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.443128 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-os-release\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.456202 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.471701 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.488980 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.503751 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.516481 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.528889 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.536066 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.536111 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.536240 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.536381 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543792 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543846 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543869 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543934 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f12a156-8db0-49be-a048-e7c4988f9cd0-mcd-auth-proxy-config\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543973 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.543989 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544015 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-system-cni-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544021 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544068 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-system-cni-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544041 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8f12a156-8db0-49be-a048-e7c4988f9cd0-rootfs\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544095 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/8f12a156-8db0-49be-a048-e7c4988f9cd0-rootfs\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544116 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-cnibin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544038 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544144 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-bin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544170 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-bin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544173 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-kubelet\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544177 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-cnibin\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544196 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544227 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-kubelet\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544235 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544270 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544287 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544335 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-os-release\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544360 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f12a156-8db0-49be-a048-e7c4988f9cd0-proxy-tls\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544382 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-hostroot\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544402 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-conf-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544430 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544461 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-daemon-config\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544488 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544512 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-cni-binary-copy\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544535 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-system-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544559 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-k8s-cni-cncf-io\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544585 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-cnibin\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544609 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544634 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544662 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jv5wt\" (UniqueName: \"kubernetes.io/projected/629a5f41-3cd8-4518-a833-2832f4ebe55a-kube-api-access-jv5wt\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544691 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ljns\" (UniqueName: \"kubernetes.io/projected/8f12a156-8db0-49be-a048-e7c4988f9cd0-kube-api-access-7ljns\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544713 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-binary-copy\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544746 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544770 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544792 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-netns\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544814 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-socket-dir-parent\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544835 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544868 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544913 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-etc-kubernetes\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544936 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544958 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544979 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jssfh\" (UniqueName: \"kubernetes.io/projected/301ed826-105f-43b3-b553-38186c8cc1be-kube-api-access-jssfh\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545000 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-multus\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545004 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545022 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545036 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.544263 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545069 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545003 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8f12a156-8db0-49be-a048-e7c4988f9cd0-mcd-auth-proxy-config\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545122 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545128 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545135 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545175 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545208 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-netns\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545248 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-socket-dir-parent\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545382 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-system-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545382 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545466 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-var-lib-cni-multus\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545694 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-daemon-config\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545729 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-k8s-cni-cncf-io\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545728 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-os-release\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545754 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-cnibin\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545823 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545875 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545895 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545911 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-conf-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545915 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545929 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-etc-kubernetes\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.545942 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-multus-certs\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546097 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-multus-cni-dir\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546128 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/629a5f41-3cd8-4518-a833-2832f4ebe55a-cni-binary-copy\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546130 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546170 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvcsc\" (UniqueName: \"kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546135 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-host-run-multus-certs\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546190 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-hostroot\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546235 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-os-release\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546333 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/629a5f41-3cd8-4518-a833-2832f4ebe55a-os-release\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546368 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546419 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546464 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546740 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/301ed826-105f-43b3-b553-38186c8cc1be-cni-binary-copy\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.546926 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/301ed826-105f-43b3-b553-38186c8cc1be-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.549961 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8f12a156-8db0-49be-a048-e7c4988f9cd0-proxy-tls\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.549986 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.552841 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.562616 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvcsc\" (UniqueName: \"kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc\") pod \"ovnkube-node-zm492\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.563418 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jssfh\" (UniqueName: \"kubernetes.io/projected/301ed826-105f-43b3-b553-38186c8cc1be-kube-api-access-jssfh\") pod \"multus-additional-cni-plugins-jpln7\" (UID: \"301ed826-105f-43b3-b553-38186c8cc1be\") " pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.568031 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ljns\" (UniqueName: \"kubernetes.io/projected/8f12a156-8db0-49be-a048-e7c4988f9cd0-kube-api-access-7ljns\") pod \"machine-config-daemon-lrth2\" (UID: \"8f12a156-8db0-49be-a048-e7c4988f9cd0\") " pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.571294 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jv5wt\" (UniqueName: \"kubernetes.io/projected/629a5f41-3cd8-4518-a833-2832f4ebe55a-kube-api-access-jv5wt\") pod \"multus-j4dcx\" (UID: \"629a5f41-3cd8-4518-a833-2832f4ebe55a\") " pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.578287 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.599770 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.615160 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-j4dcx" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.616298 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.630890 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.636633 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.639855 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jpln7" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.645978 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.655718 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: W1121 15:35:37.662067 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeb9277d_9a26_4665_a01c_9ed1c379e8dd.slice/crio-fa379cf57929b4b3e288f4fa9571c203fd83769bbd890c72ac259ac7dcbbfdf2 WatchSource:0}: Error finding container fa379cf57929b4b3e288f4fa9571c203fd83769bbd890c72ac259ac7dcbbfdf2: Status 404 returned error can't find the container with id fa379cf57929b4b3e288f4fa9571c203fd83769bbd890c72ac259ac7dcbbfdf2 Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.671827 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.689741 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"f0442c3e35a1728cda05047d81422588e39820c7ee42013c5758c200beb526da"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.691042 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerStarted","Data":"c1d3e5bd84825c7786e5e2ceff543c6ecbf19f4c3d9dff11eec79db00873b22f"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.694554 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.696255 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"fa379cf57929b4b3e288f4fa9571c203fd83769bbd890c72ac259ac7dcbbfdf2"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.698925 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-c8qfd" event={"ID":"62aa85b6-33c6-4631-8877-e4d4f4f8bb16","Type":"ContainerStarted","Data":"b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.698984 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-c8qfd" event={"ID":"62aa85b6-33c6-4631-8877-e4d4f4f8bb16","Type":"ContainerStarted","Data":"e9b5c905274201616c054f0be66cb61eec515e4f0623c9f1784d1517d0866e5d"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.701894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerStarted","Data":"c4849139b4ae59d5ddde0ca3a4d3fbad00ebd23b75598777a8bb7dfb1e13d0ff"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.703142 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-8srmv" event={"ID":"9af74498-5bb0-49a3-bf13-2ea73a127539","Type":"ContainerStarted","Data":"e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.703182 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-8srmv" event={"ID":"9af74498-5bb0-49a3-bf13-2ea73a127539","Type":"ContainerStarted","Data":"f3880684d938755bec08c19d42317ca00a1767afa62abee8fdaf5788f60adf16"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.713149 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.726926 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.737684 4967 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.744577 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.744653 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.744675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.744892 4967 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.753455 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.755616 4967 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.755856 4967 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.757209 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.757683 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.757702 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.757727 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.757740 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.777885 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.779039 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.784117 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.784141 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.784150 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.784168 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.784180 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.790600 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.797996 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.803288 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.803325 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.803336 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.803351 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.803361 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.806221 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.816043 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.821345 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.821499 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.821578 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.821678 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.821750 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.827111 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.838381 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.841797 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.842879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.842900 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.842908 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.842924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.842935 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.856153 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: E1121 15:35:37.856355 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860245 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860641 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860654 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860672 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.860683 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.872881 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.889427 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.903821 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.917184 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.932177 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.946803 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.963273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.963339 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.963348 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.963568 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.963917 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:37Z","lastTransitionTime":"2025-11-21T15:35:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.964241 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:37 crc kubenswrapper[4967]: I1121 15:35:37.980744 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:37Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.004200 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.019081 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.067387 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.067457 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.067484 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.067510 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.067527 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.170552 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.170619 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.170632 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.170656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.170671 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.273561 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.273636 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.273652 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.273680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.273696 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.376957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.377016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.377032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.377054 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.377071 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.480573 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.480634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.480645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.480676 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.480689 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.536238 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:38 crc kubenswrapper[4967]: E1121 15:35:38.536478 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.583699 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.583754 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.583766 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.583785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.583798 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.686785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.686835 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.686848 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.686866 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.686879 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.710896 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.711014 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.712704 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerStarted","Data":"3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.714565 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36" exitCode=0 Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.714629 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.716900 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020" exitCode=0 Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.716958 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.731710 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.751604 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.771654 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.789473 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.790718 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.790757 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.790771 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.790792 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.790806 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.803648 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.828141 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.842200 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.855958 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.872919 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.889497 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.895505 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.895557 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.895572 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.895596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.895614 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:38Z","lastTransitionTime":"2025-11-21T15:35:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.906271 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.926339 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.943358 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.969686 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:38 crc kubenswrapper[4967]: I1121 15:35:38.987459 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:38Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.000409 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.000479 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.000491 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.000513 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.000528 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.011355 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.029012 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.045437 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.060494 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.075951 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.087698 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.102772 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.105688 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.105744 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.105757 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.105781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.105795 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.124886 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.139641 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.155601 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.175030 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.190501 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.202834 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.208712 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.208754 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.208764 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.208785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.208797 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.267641 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.267774 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.267829 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.267989 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.268068 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:47.268046876 +0000 UTC m=+35.526567884 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.268518 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:35:47.268509279 +0000 UTC m=+35.527030277 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.268565 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.268592 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:47.268585511 +0000 UTC m=+35.527106519 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.314007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.314044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.314053 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.314071 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.314081 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.368777 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.368843 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369004 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369024 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369038 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369101 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:47.369083504 +0000 UTC m=+35.627604512 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369156 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369165 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369172 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.369193 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:35:47.369186447 +0000 UTC m=+35.627707455 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.418339 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.418973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.419031 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.419119 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.419183 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.522782 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.522831 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.522845 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.522868 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.522883 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.535753 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.535968 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.536551 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:39 crc kubenswrapper[4967]: E1121 15:35:39.536643 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.626174 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.626269 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.626284 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.626370 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.626396 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.723489 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerStarted","Data":"516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.726782 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.726848 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.726866 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.726880 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.728247 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.728301 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.728370 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.728399 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.728484 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.744744 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.760144 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.775207 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.790439 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.800983 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.818344 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.832607 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.832651 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.832664 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.832683 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.832693 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.838032 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.853408 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.871778 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.884498 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.901387 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.916710 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.931852 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.935766 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.935808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.935819 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.935847 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.935863 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:39Z","lastTransitionTime":"2025-11-21T15:35:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:39 crc kubenswrapper[4967]: I1121 15:35:39.952223 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.039531 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.039607 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.039625 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.039653 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.039674 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.149245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.149294 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.149305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.149343 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.149364 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.252450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.252497 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.252507 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.252526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.252537 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.356289 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.356402 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.356416 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.356444 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.356459 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.459988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.460024 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.460038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.460056 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.460069 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.535629 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:40 crc kubenswrapper[4967]: E1121 15:35:40.535765 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.562849 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.562903 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.562916 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.562938 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.562957 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.665975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.666020 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.666034 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.666055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.666066 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.735783 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.735841 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.738412 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225" exitCode=0 Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.738467 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.755204 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.769248 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.769346 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.769360 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.769382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.769398 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.770977 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.792816 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.810706 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.823928 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.836405 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.848289 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.860451 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.872232 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.872274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.872285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.872319 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.872330 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.878445 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.907058 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.922709 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.937670 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.955108 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.971560 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:40Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.976006 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.976072 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.976087 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.976108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:40 crc kubenswrapper[4967]: I1121 15:35:40.976119 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:40Z","lastTransitionTime":"2025-11-21T15:35:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.079581 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.079640 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.079653 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.079681 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.079696 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.183121 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.183159 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.183178 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.183208 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.183222 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.286020 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.286062 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.286082 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.286098 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.286108 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.388809 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.388853 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.388872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.388898 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.388917 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.491799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.491842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.491855 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.491874 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.491887 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.535287 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:41 crc kubenswrapper[4967]: E1121 15:35:41.535550 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.535647 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:41 crc kubenswrapper[4967]: E1121 15:35:41.535897 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.594928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.594987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.595001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.595035 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.595057 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.699183 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.699861 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.699893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.699916 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.699928 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.751958 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e" exitCode=0 Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.752024 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.773022 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.792684 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.802979 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.803026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.803035 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.803051 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.803063 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.808605 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.824248 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.837412 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.853175 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.868963 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.884360 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.898966 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.907156 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.907200 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.907213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.907233 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.907245 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:41Z","lastTransitionTime":"2025-11-21T15:35:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.921548 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.936200 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.950444 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.964812 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:41 crc kubenswrapper[4967]: I1121 15:35:41.985177 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:41Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.010166 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.010215 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.010225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.010248 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.010259 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.112950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.113005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.113018 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.113038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.113050 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.216669 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.217141 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.217152 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.217171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.217186 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.275558 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.276350 4967 scope.go:117] "RemoveContainer" containerID="f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.319945 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.320001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.320011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.320029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.320042 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.421858 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.421894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.421935 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.421951 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.421960 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.525188 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.525241 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.525254 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.525277 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.525294 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.536238 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:42 crc kubenswrapper[4967]: E1121 15:35:42.540846 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.556290 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.579706 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.593432 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.608779 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.622192 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.628409 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.628458 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.628469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.628487 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.628500 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.637845 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.652046 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.666045 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.681185 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.695811 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.708544 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.729163 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.730765 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.730806 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.730816 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.730833 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.730853 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.750376 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.758991 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482" exitCode=0 Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.759098 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.767871 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.770928 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.772169 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.772920 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.777813 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.803290 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.818092 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.833224 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.833257 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.833266 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.833280 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.833291 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.834304 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.849906 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.864692 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.879814 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.900574 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.915460 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.930042 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.935208 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.935258 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.935272 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.935292 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.935325 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:42Z","lastTransitionTime":"2025-11-21T15:35:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.945743 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.963021 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.975559 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:42 crc kubenswrapper[4967]: I1121 15:35:42.989475 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.009060 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.037898 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.038298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.038399 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.038476 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.038544 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.141727 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.141796 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.141810 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.141839 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.141851 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.244920 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.244969 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.244978 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.244999 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.245010 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.348987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.349020 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.349028 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.349044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.349054 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.451765 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.451814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.451827 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.451846 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.451858 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.535951 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.535986 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:43 crc kubenswrapper[4967]: E1121 15:35:43.536154 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:43 crc kubenswrapper[4967]: E1121 15:35:43.536268 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.555229 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.555280 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.555291 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.555323 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.555335 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.658914 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.658962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.658976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.658993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.659008 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.761962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.762022 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.762035 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.762060 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.762075 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.781121 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerStarted","Data":"88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.795840 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.812183 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.831916 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.851616 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.864881 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.864918 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.864931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.864950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.864962 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.869875 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.891107 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.904450 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.919332 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.939291 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.955182 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.967467 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.967531 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.967545 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.967561 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.967807 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:43Z","lastTransitionTime":"2025-11-21T15:35:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.970524 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.983537 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:43 crc kubenswrapper[4967]: I1121 15:35:43.998074 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:43Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.008059 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.069929 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.069976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.069990 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.070008 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.070018 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.173110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.173198 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.173211 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.173236 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.173248 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.275391 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.275817 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.275828 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.275847 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.275858 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.383371 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.383451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.383471 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.383499 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.383514 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.486879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.486926 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.486938 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.486955 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.486964 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.535684 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:44 crc kubenswrapper[4967]: E1121 15:35:44.535876 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.590543 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.590618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.590643 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.590681 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.590705 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.693284 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.694422 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.694444 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.694469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.694486 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.789473 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545" exitCode=0 Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.789595 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.794299 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.794638 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.796896 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.796974 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.796991 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.797011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.797024 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.809760 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.824552 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.881585 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.881783 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.902973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.903026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.903039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.903058 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.903070 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:44Z","lastTransitionTime":"2025-11-21T15:35:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.908540 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.936629 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.953789 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.968461 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:44 crc kubenswrapper[4967]: I1121 15:35:44.986642 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:44Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.006057 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.009110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.009193 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.009228 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.009255 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.009272 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.020153 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.034719 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.050382 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.066431 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.081066 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.099965 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.112345 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.112385 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.112397 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.112413 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.112424 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.118209 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.134153 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.148379 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.160975 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.176837 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.192517 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.211459 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.215770 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.215847 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.215863 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.215884 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.215898 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.237762 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.252785 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.276524 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.293027 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.309967 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.318391 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.318441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.318454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.318477 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.318492 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.323635 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.421798 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.421843 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.421852 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.421871 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.421884 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.524861 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.524907 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.524918 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.524933 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.524945 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.535447 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:45 crc kubenswrapper[4967]: E1121 15:35:45.535552 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.535450 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:45 crc kubenswrapper[4967]: E1121 15:35:45.535666 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.627603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.627656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.627668 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.627687 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.627699 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.730343 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.730436 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.730455 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.730485 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.730504 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.798637 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.799288 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.822862 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.832928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.832975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.832988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.833008 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.833022 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.838612 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.859069 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.875296 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.887637 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.903602 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.918067 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.931415 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.936394 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.936432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.936445 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.936462 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.936473 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:45Z","lastTransitionTime":"2025-11-21T15:35:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.949464 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.966064 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:45 crc kubenswrapper[4967]: I1121 15:35:45.982091 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.001874 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:45Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.018730 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.033020 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.039432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.039494 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.039512 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.039532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.039546 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.048782 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.142632 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.142703 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.142715 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.142737 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.142749 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.244922 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.244973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.244981 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.244997 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.245007 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.347994 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.348032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.348044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.348061 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.348070 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.451555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.451610 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.451619 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.451647 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.451658 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.535732 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:46 crc kubenswrapper[4967]: E1121 15:35:46.535887 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.555860 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.555897 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.555906 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.555921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.555932 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.658682 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.658717 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.658725 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.658757 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.658769 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.763112 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.763165 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.763177 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.763202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.763216 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.806487 4967 generic.go:334] "Generic (PLEG): container finished" podID="301ed826-105f-43b3-b553-38186c8cc1be" containerID="359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313" exitCode=0 Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.806621 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerDied","Data":"359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.806712 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.827374 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.850947 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.867397 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.867435 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.867448 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.867464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.867475 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.869155 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.884293 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.899016 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.914841 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.926150 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.939215 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.950531 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.962635 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.970211 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.970260 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.970270 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.970291 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.970302 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:46Z","lastTransitionTime":"2025-11-21T15:35:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.976530 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:46 crc kubenswrapper[4967]: I1121 15:35:46.991122 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:46Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.005457 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.025246 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.072973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.073008 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.073018 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.073034 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.073045 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.175766 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.175818 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.175834 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.175853 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.175864 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.278222 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.278256 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.278264 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.278281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.278293 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.360068 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.360181 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.360238 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:36:03.36022124 +0000 UTC m=+51.618742248 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.360263 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.360336 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:03.360301272 +0000 UTC m=+51.618822280 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.360355 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.360387 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:03.360379895 +0000 UTC m=+51.618900903 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.360271 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.380405 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.380443 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.380454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.380471 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.380486 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.461078 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.461150 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461274 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461301 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461332 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461274 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461366 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461375 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461385 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:03.46136823 +0000 UTC m=+51.719889238 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.461404 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:03.461395721 +0000 UTC m=+51.719916729 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.482382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.482414 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.482423 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.482437 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.482446 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.536060 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.536129 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.536194 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:47 crc kubenswrapper[4967]: E1121 15:35:47.536325 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.584631 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.584670 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.584679 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.584695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.584706 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.687049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.687097 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.687110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.687132 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.687148 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.792146 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.792222 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.792239 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.792271 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.792290 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.813637 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" event={"ID":"301ed826-105f-43b3-b553-38186c8cc1be","Type":"ContainerStarted","Data":"bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.813735 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.831348 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.845496 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.858529 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.872953 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.886440 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.894823 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.895100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.895245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.895384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.895488 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.900708 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.915299 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.929786 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.949447 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.962988 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.978055 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.995567 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:47Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.998581 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.998634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.998645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.998663 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:47 crc kubenswrapper[4967]: I1121 15:35:47.998675 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:47Z","lastTransitionTime":"2025-11-21T15:35:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.010090 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.020590 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.076893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.076936 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.076950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.076982 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.076994 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.089556 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.092839 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.092872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.092881 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.092899 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.092908 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.108842 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.113176 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.113214 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.113226 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.113241 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.113251 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.126530 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.130378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.130404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.130413 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.130428 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.130441 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.141723 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.147277 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.147383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.147406 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.147436 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.147465 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.162518 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:48Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.162706 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.164558 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.164592 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.164602 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.164618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.164630 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.266904 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.266941 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.266950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.266964 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.266974 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.370016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.370066 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.370076 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.370094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.370106 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.473143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.473207 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.473219 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.473248 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.473262 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.538536 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:48 crc kubenswrapper[4967]: E1121 15:35:48.538681 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.577330 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.577395 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.577408 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.577436 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.577450 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.680669 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.680754 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.680769 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.680801 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.680819 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.783971 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.784039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.784049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.784068 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.784079 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.887364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.887405 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.887432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.887470 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.887485 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.991168 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.991229 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.991239 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.991262 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:48 crc kubenswrapper[4967]: I1121 15:35:48.991275 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:48Z","lastTransitionTime":"2025-11-21T15:35:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.094029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.094095 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.094110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.094137 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.094158 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.197084 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.197148 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.197166 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.197183 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.197193 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.300015 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.300064 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.300073 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.300094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.300104 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.403036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.403089 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.403101 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.403124 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.403137 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.505395 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.505429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.505437 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.505455 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.505464 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.535823 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.535870 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:49 crc kubenswrapper[4967]: E1121 15:35:49.535952 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:49 crc kubenswrapper[4967]: E1121 15:35:49.536035 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.608129 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.608173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.608184 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.608202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.608215 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.710306 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.710359 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.710368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.710381 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.710391 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.813077 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.813119 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.813127 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.813142 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.813150 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.914910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.914971 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.914982 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.915015 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.915026 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:49Z","lastTransitionTime":"2025-11-21T15:35:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.989438 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v"] Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.989833 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.991975 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 21 15:35:49 crc kubenswrapper[4967]: I1121 15:35:49.992226 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.000700 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017522 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017616 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017672 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017682 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017696 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.017705 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.031280 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.042729 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.054966 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.073628 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.088601 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.088769 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.088814 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.088870 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzdls\" (UniqueName: \"kubernetes.io/projected/69ff522a-c497-426d-9af8-5afbdb04dc0b-kube-api-access-lzdls\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.089458 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.106763 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.120193 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.120239 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.120254 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.120274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.120287 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.121752 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.140640 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.154478 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.169270 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.182450 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.189413 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.189446 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.189480 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzdls\" (UniqueName: \"kubernetes.io/projected/69ff522a-c497-426d-9af8-5afbdb04dc0b-kube-api-access-lzdls\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.189510 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.190097 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-env-overrides\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.190570 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.195232 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.199850 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/69ff522a-c497-426d-9af8-5afbdb04dc0b-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.206074 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzdls\" (UniqueName: \"kubernetes.io/projected/69ff522a-c497-426d-9af8-5afbdb04dc0b-kube-api-access-lzdls\") pod \"ovnkube-control-plane-749d76644c-79w5v\" (UID: \"69ff522a-c497-426d-9af8-5afbdb04dc0b\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.207644 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.222624 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.222674 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.222686 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.222702 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.222715 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.307153 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" Nov 21 15:35:50 crc kubenswrapper[4967]: W1121 15:35:50.324200 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69ff522a_c497_426d_9af8_5afbdb04dc0b.slice/crio-7ddac22bf4099df3684174cf0c088b5f05bcfdc9535bb55a6ee9c80e32e516a7 WatchSource:0}: Error finding container 7ddac22bf4099df3684174cf0c088b5f05bcfdc9535bb55a6ee9c80e32e516a7: Status 404 returned error can't find the container with id 7ddac22bf4099df3684174cf0c088b5f05bcfdc9535bb55a6ee9c80e32e516a7 Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.325414 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.325454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.325464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.325479 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.325490 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.428277 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.428354 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.428367 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.428384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.428395 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.530906 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.531450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.531464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.531481 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.531491 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.535704 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:50 crc kubenswrapper[4967]: E1121 15:35:50.535860 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.634109 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.634162 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.634172 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.634192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.634202 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.714595 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-kj7qv"] Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.715094 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: E1121 15:35:50.715280 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.731175 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.744504 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.744570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.744583 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.744601 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.744613 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.766291 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.790688 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.795404 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnwnx\" (UniqueName: \"kubernetes.io/projected/e413228d-eaa3-45fb-8adf-35e0054bf53c-kube-api-access-tnwnx\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.795770 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.809838 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.824245 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/0.log" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.827511 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb" exitCode=1 Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.827562 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.828532 4967 scope.go:117] "RemoveContainer" containerID="766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.829892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" event={"ID":"69ff522a-c497-426d-9af8-5afbdb04dc0b","Type":"ContainerStarted","Data":"ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.829937 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" event={"ID":"69ff522a-c497-426d-9af8-5afbdb04dc0b","Type":"ContainerStarted","Data":"35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.829949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" event={"ID":"69ff522a-c497-426d-9af8-5afbdb04dc0b","Type":"ContainerStarted","Data":"7ddac22bf4099df3684174cf0c088b5f05bcfdc9535bb55a6ee9c80e32e516a7"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.832425 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.847747 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.847786 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.847795 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.847808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.847816 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.851491 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.864767 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.878479 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.890133 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.896737 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnwnx\" (UniqueName: \"kubernetes.io/projected/e413228d-eaa3-45fb-8adf-35e0054bf53c-kube-api-access-tnwnx\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.896773 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: E1121 15:35:50.896887 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:50 crc kubenswrapper[4967]: E1121 15:35:50.896927 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:35:51.396915145 +0000 UTC m=+39.655436153 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.909934 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.917430 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnwnx\" (UniqueName: \"kubernetes.io/projected/e413228d-eaa3-45fb-8adf-35e0054bf53c-kube-api-access-tnwnx\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.924771 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.937783 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.950265 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.951894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.952029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.952050 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.952070 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.952085 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:50Z","lastTransitionTime":"2025-11-21T15:35:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.961908 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.973035 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:50 crc kubenswrapper[4967]: I1121 15:35:50.987602 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.000708 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:50Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.015232 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.028244 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.041870 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.053993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.054021 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.054030 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.054044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.054056 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.059201 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.073796 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.089808 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.108247 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.121700 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.134911 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.146732 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.156426 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.156459 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.156469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.156486 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.156497 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.160048 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.170784 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.183292 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.203798 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.217449 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.259295 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.259374 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.259385 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.259400 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.259415 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.363359 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.363695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.363758 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.363987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.364079 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.401857 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:51 crc kubenswrapper[4967]: E1121 15:35:51.402021 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:51 crc kubenswrapper[4967]: E1121 15:35:51.402078 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:35:52.402060221 +0000 UTC m=+40.660581219 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.466407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.466799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.466901 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.466995 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.467090 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.535604 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:51 crc kubenswrapper[4967]: E1121 15:35:51.535759 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.535598 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:51 crc kubenswrapper[4967]: E1121 15:35:51.536163 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.569921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.569963 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.569973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.569989 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.569999 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.671962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.672001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.672013 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.672029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.672040 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.774375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.774411 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.774420 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.774440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.774449 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.835865 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/0.log" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.838467 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.838600 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.855824 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.878033 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.882446 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.882507 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.882522 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.882548 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.882561 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.902504 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.954252 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.973738 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.984645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.984680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.984709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.984725 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.984734 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:51Z","lastTransitionTime":"2025-11-21T15:35:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:51 crc kubenswrapper[4967]: I1121 15:35:51.991275 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:51Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.004088 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.017288 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.028540 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.042526 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.062824 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.077654 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.087207 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.087247 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.087260 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.087276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.087286 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.090744 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.104367 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.127372 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.144917 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.189698 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.189736 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.189745 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.189760 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.189770 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.292216 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.292257 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.292266 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.292281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.292294 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.395896 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.396293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.396305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.396346 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.396356 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.411489 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:52 crc kubenswrapper[4967]: E1121 15:35:52.411635 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:52 crc kubenswrapper[4967]: E1121 15:35:52.411699 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:35:54.411685513 +0000 UTC m=+42.670206521 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.497996 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.498044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.498059 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.498074 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.498085 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.535805 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.535868 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:52 crc kubenswrapper[4967]: E1121 15:35:52.535960 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:35:52 crc kubenswrapper[4967]: E1121 15:35:52.536299 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.549635 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.564640 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.581435 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.597089 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.600802 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.600844 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.600855 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.600872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.600884 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.615186 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.634637 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.652184 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.669181 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.687703 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.703974 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.704226 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.704276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.704288 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.704305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.704334 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.726738 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.744911 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.762547 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.780803 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.796299 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.807798 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.807859 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.807881 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.807909 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.807929 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.813809 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.843841 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/1.log" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.844610 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/0.log" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.848693 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e" exitCode=1 Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.848760 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.848860 4967 scope.go:117] "RemoveContainer" containerID="766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.849571 4967 scope.go:117] "RemoveContainer" containerID="7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e" Nov 21 15:35:52 crc kubenswrapper[4967]: E1121 15:35:52.849830 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.865512 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.887762 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.905908 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.911249 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.911300 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.911329 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.911350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.911362 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:52Z","lastTransitionTime":"2025-11-21T15:35:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.928905 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.947250 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.965877 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.982576 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:52 crc kubenswrapper[4967]: I1121 15:35:52.996467 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.012781 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.013986 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.014029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.014039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.014057 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.014067 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.028545 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.046520 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.061918 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.077293 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.090500 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.102335 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.113790 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:53Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.116659 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.116713 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.116726 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.116745 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.116757 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.219535 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.219579 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.219592 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.219616 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.219664 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.323117 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.323468 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.323487 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.323514 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.323529 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.426101 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.426449 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.426543 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.426631 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.426714 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.529955 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.529993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.530004 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.530019 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.530028 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.536144 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.536201 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:53 crc kubenswrapper[4967]: E1121 15:35:53.536265 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:53 crc kubenswrapper[4967]: E1121 15:35:53.536471 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.632891 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.633195 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.633277 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.633389 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.633475 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.736981 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.737030 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.737041 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.737059 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.737070 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.839040 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.839299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.839399 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.839469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.839566 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.852540 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/1.log" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.942950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.942996 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.943012 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.943034 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:53 crc kubenswrapper[4967]: I1121 15:35:53.943048 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:53Z","lastTransitionTime":"2025-11-21T15:35:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.048464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.048516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.048527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.048564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.048619 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.150951 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.151022 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.151043 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.151070 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.151090 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.254388 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.254461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.254473 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.254493 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.254510 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.256695 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.274221 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.292124 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.309750 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.325087 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.336738 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.350448 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.356280 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.356344 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.356355 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.356376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.356388 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.368049 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.384166 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.398140 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.416605 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.431122 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.438737 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:54 crc kubenswrapper[4967]: E1121 15:35:54.438967 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:54 crc kubenswrapper[4967]: E1121 15:35:54.439210 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:35:58.439191864 +0000 UTC m=+46.697712872 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.446290 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.460449 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.460525 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.460538 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.460558 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.460570 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.464106 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.479065 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.491730 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.503869 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:54Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.536258 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.536414 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:54 crc kubenswrapper[4967]: E1121 15:35:54.536455 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:54 crc kubenswrapper[4967]: E1121 15:35:54.536643 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.563621 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.563656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.563667 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.563679 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.563688 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.666561 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.666600 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.666613 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.666635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.666649 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.769596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.769659 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.769668 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.769684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.769693 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.871364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.871397 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.871407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.871420 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.871430 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.973890 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.973920 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.973929 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.973943 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:54 crc kubenswrapper[4967]: I1121 15:35:54.973953 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:54Z","lastTransitionTime":"2025-11-21T15:35:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.076671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.076733 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.076744 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.076770 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.076782 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.179160 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.179330 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.179344 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.179361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.179370 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.282228 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.282345 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.282357 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.282379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.282395 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.386474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.386568 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.386586 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.386643 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.386661 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.490105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.490475 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.490586 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.490677 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.490754 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.535494 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.535534 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:55 crc kubenswrapper[4967]: E1121 15:35:55.535619 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:55 crc kubenswrapper[4967]: E1121 15:35:55.535681 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.593265 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.593428 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.593450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.593470 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.593480 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.696851 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.696927 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.696937 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.696950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.696960 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.798948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.798990 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.799059 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.799078 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.799095 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.901555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.901600 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.901612 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.901638 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:55 crc kubenswrapper[4967]: I1121 15:35:55.901651 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:55Z","lastTransitionTime":"2025-11-21T15:35:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.003973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.004279 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.004498 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.004651 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.004715 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.107585 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.107619 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.107630 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.107667 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.107683 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.212029 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.212067 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.212103 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.212119 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.212128 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.314383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.314440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.314451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.314466 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.314495 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.416945 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.416999 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.417014 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.417032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.417044 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.521904 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.521990 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.522005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.522036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.522049 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.535476 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.535653 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:56 crc kubenswrapper[4967]: E1121 15:35:56.535956 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:56 crc kubenswrapper[4967]: E1121 15:35:56.536047 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.624889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.624931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.624943 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.624961 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.624971 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.727635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.727671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.727683 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.727699 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.727710 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.829994 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.830353 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.830461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.830556 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.830659 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.935911 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.936454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.936560 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.936692 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:56 crc kubenswrapper[4967]: I1121 15:35:56.936785 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:56Z","lastTransitionTime":"2025-11-21T15:35:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.045515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.045559 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.045569 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.045586 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.045597 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.148899 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.148966 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.148993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.149016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.149026 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.252460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.252511 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.252519 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.252541 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.252555 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.355982 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.356040 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.356088 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.356111 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.356130 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.458504 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.458541 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.458551 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.458569 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.458581 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.535763 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.535925 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:57 crc kubenswrapper[4967]: E1121 15:35:57.535998 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:57 crc kubenswrapper[4967]: E1121 15:35:57.536142 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.561638 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.561900 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.561959 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.562017 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.562070 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.664665 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.664707 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.664715 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.664728 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.664738 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.766966 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.767017 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.767026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.767039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.767049 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.868751 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.869039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.869126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.869258 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.869362 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.971659 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.971701 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.971711 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.971724 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:57 crc kubenswrapper[4967]: I1121 15:35:57.971733 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:57Z","lastTransitionTime":"2025-11-21T15:35:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.074016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.074271 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.074376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.074464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.074579 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.177504 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.177541 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.177550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.177568 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.177585 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.185552 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.185733 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.185797 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.185863 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.185927 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.200752 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:58Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.204544 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.204583 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.204598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.204616 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.204627 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.217638 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:58Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.222016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.222055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.222065 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.222081 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.222091 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.240344 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:58Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.244962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.245067 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.245101 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.245126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.245138 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.259692 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:58Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.264163 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.264242 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.264257 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.264283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.264303 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.276456 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:58Z is after 2025-08-24T17:21:41Z" Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.276634 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.280462 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.280510 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.280521 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.280540 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.280550 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.383543 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.383605 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.383624 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.383648 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.383666 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.483413 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.483667 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.483779 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:36:06.483755887 +0000 UTC m=+54.742276965 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.486387 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.486426 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.486435 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.486451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.486461 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.536173 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.536229 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.536372 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:35:58 crc kubenswrapper[4967]: E1121 15:35:58.536431 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.588728 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.588795 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.588814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.588842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.588857 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.775049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.775094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.775105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.775123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.775134 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.876911 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.876948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.876958 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.876973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.876985 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.979848 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.979897 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.979909 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.979928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:58 crc kubenswrapper[4967]: I1121 15:35:58.979941 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:58Z","lastTransitionTime":"2025-11-21T15:35:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.083127 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.083183 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.083196 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.083216 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.083230 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.185836 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.185868 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.185878 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.185892 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.185904 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.288202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.288248 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.288260 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.288275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.288286 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.391219 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.391276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.391285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.391299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.391332 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.493269 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.493329 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.493338 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.493353 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.493362 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.535696 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:35:59 crc kubenswrapper[4967]: E1121 15:35:59.535855 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.535923 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:35:59 crc kubenswrapper[4967]: E1121 15:35:59.536193 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.595274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.595361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.595376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.595395 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.595407 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.698519 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.698578 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.698593 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.698615 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.698630 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.800808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.800862 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.800873 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.800890 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.800901 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.904203 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.904256 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.904271 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.904293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.904588 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:35:59Z","lastTransitionTime":"2025-11-21T15:35:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.977044 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 15:35:59 crc kubenswrapper[4967]: I1121 15:35:59.987450 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.000003 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:35:59Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.007040 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.007100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.007112 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.007135 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.007152 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.014452 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.032456 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.046254 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.059600 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.072174 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.085486 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.097976 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.110349 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.110383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.110392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.110406 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.110417 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.114612 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.128613 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.160573 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.174761 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.194379 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.211874 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.214286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.214364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.214382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.214410 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.214432 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.227876 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.240468 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:00Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.317349 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.317384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.317393 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.317407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.317416 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.420931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.421001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.421015 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.421044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.421060 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.524595 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.524663 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.524673 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.524692 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.524710 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.536217 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.536217 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:00 crc kubenswrapper[4967]: E1121 15:36:00.536622 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:00 crc kubenswrapper[4967]: E1121 15:36:00.537636 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.628926 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.629014 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.629032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.629063 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.629086 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.733250 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.733302 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.733347 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.733375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.733392 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.837070 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.837151 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.837173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.837213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.837253 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.942035 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.942094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.942105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.942132 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:00 crc kubenswrapper[4967]: I1121 15:36:00.942146 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:00Z","lastTransitionTime":"2025-11-21T15:36:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.046013 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.046083 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.046108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.046142 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.046169 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.149342 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.149400 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.149417 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.149440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.149453 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.252651 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.252705 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.252717 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.252735 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.252744 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.356116 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.356186 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.356203 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.356232 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.356253 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.459980 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.460049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.460063 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.460086 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.460101 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.535394 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:01 crc kubenswrapper[4967]: E1121 15:36:01.535625 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.535454 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:01 crc kubenswrapper[4967]: E1121 15:36:01.536244 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.563573 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.563722 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.563765 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.563793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.563813 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.667467 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.667528 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.667539 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.667564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.667580 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.770838 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.771450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.771638 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.771810 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.771958 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.875600 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.875658 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.875672 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.875694 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.875713 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.979281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.979351 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.979363 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.979382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:01 crc kubenswrapper[4967]: I1121 15:36:01.979394 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:01Z","lastTransitionTime":"2025-11-21T15:36:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.083090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.083164 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.083180 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.083212 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.083233 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.185671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.185724 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.185735 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.185752 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.185763 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.288156 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.288204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.288214 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.288229 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.288238 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.391279 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.391368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.391380 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.391404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.391421 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.494527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.494577 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.494590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.494610 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.494625 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.535398 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.535447 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:02 crc kubenswrapper[4967]: E1121 15:36:02.535631 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:02 crc kubenswrapper[4967]: E1121 15:36:02.535865 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.552656 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.574694 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.589215 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.598206 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.598299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.598350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.598378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.598391 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.606441 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.622970 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.637302 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.653137 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.672474 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.685461 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.701723 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.701809 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.701823 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.701867 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.701881 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.706283 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://766490517b82844a99521e42980fcd6b0e94eb2aee0dedb6f933747591347fdb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"tor.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488511 6302 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:35:49.488976 6302 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 15:35:49.489020 6302 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 15:35:49.489097 6302 handler.go:208] Removed *v1.Node event handler 7\\\\nI1121 15:35:49.489177 6302 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 15:35:49.489290 6302 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1121 15:35:49.489396 6302 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1121 15:35:49.489454 6302 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 15:35:49.489486 6302 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 15:35:49.489455 6302 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1121 15:35:49.489515 6302 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1121 15:35:49.489530 6302 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 15:35:49.489576 6302 factory.go:656] Stopping watch factory\\\\nI1121 15:35:49.489613 6302 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.720795 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.735918 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.750337 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.766754 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.781491 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.796172 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.804613 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.804661 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.804670 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.804685 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.804695 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.811129 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:02Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.906808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.906859 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.906874 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.906894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:02 crc kubenswrapper[4967]: I1121 15:36:02.906907 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:02Z","lastTransitionTime":"2025-11-21T15:36:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.010464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.010532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.010544 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.010569 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.010584 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.113774 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.113819 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.113830 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.113849 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.113860 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.217274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.217366 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.217383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.217403 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.217415 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.320361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.320414 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.320423 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.320472 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.320483 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.419118 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.419297 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:36:35.419266611 +0000 UTC m=+83.677787619 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.419416 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.419502 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.419632 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.419724 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.419732 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:35.419708473 +0000 UTC m=+83.678229481 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.419794 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:35.419785215 +0000 UTC m=+83.678306223 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.423527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.423591 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.423609 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.423641 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.423657 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.520244 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.520421 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520524 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520562 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520576 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520579 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520597 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520609 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520646 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:35.520628697 +0000 UTC m=+83.779149705 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.520666 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:36:35.520659378 +0000 UTC m=+83.779180386 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.526793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.526884 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.526942 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.527045 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.527110 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.535261 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.535376 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.535423 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:03 crc kubenswrapper[4967]: E1121 15:36:03.535553 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.629915 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.629973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.629985 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.630007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.630019 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.732764 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.732825 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.732837 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.732869 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.732882 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.834908 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.834945 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.834954 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.834968 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.834977 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.937082 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.937136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.937148 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.937165 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:03 crc kubenswrapper[4967]: I1121 15:36:03.937179 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:03Z","lastTransitionTime":"2025-11-21T15:36:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.040220 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.040281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.040299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.040336 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.040348 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.142784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.142826 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.142836 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.142852 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.142862 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.245418 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.245451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.245460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.245474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.245484 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.347598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.347642 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.347655 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.347671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.347682 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.450564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.450603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.450615 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.450631 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.450641 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.535853 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:04 crc kubenswrapper[4967]: E1121 15:36:04.535992 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.536122 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:04 crc kubenswrapper[4967]: E1121 15:36:04.536287 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.553472 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.553516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.553527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.553547 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.553557 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.656297 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.656377 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.656392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.656411 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.656429 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.759043 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.759094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.759103 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.759124 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.759138 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.863028 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.863091 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.863105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.863127 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.863142 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.966590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.966633 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.966643 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.966657 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:04 crc kubenswrapper[4967]: I1121 15:36:04.966666 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:04Z","lastTransitionTime":"2025-11-21T15:36:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.070223 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.070276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.070285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.070302 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.070337 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.174000 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.174108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.174137 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.174259 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.174291 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.277057 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.277105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.277118 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.277134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.277145 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.379567 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.379618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.379634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.379661 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.379682 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.481557 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.481608 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.481618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.481631 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.481642 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.535449 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.535624 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:05 crc kubenswrapper[4967]: E1121 15:36:05.535708 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:05 crc kubenswrapper[4967]: E1121 15:36:05.535831 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.584064 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.584104 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.584112 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.584126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.584135 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.687350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.687392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.687401 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.687419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.687428 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.791062 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.791102 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.791112 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.791131 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.791144 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.893287 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.893340 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.893350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.893366 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.893375 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.995656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.995966 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.996031 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.996108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:05 crc kubenswrapper[4967]: I1121 15:36:05.996179 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:05Z","lastTransitionTime":"2025-11-21T15:36:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.099074 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.099119 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.099129 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.099146 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.099157 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.201355 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.201391 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.201403 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.201419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.201429 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.303780 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.303811 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.303820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.303833 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.303842 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.407130 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.407167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.407177 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.407192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.407201 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.510140 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.510192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.510204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.510230 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.510242 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.535662 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.535720 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:06 crc kubenswrapper[4967]: E1121 15:36:06.535960 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:06 crc kubenswrapper[4967]: E1121 15:36:06.536033 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.536859 4967 scope.go:117] "RemoveContainer" containerID="7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.553889 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:06 crc kubenswrapper[4967]: E1121 15:36:06.554485 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:06 crc kubenswrapper[4967]: E1121 15:36:06.554559 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:36:22.554542081 +0000 UTC m=+70.813063089 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.557055 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.567498 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.586427 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.600039 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.612088 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.612134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.612145 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.612160 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.612178 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.619003 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.631652 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.646091 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.657150 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.672599 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.689795 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.710518 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.714557 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.714588 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.714596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.714608 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.714617 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.730662 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.744251 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.756079 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.769766 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.783615 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.796022 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.816500 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.816570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.816580 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.816596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.816609 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.895459 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/1.log" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.897843 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.897973 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.910653 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.919027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.919081 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.919096 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.919115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.919127 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:06Z","lastTransitionTime":"2025-11-21T15:36:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.923466 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.938058 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.949516 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.967064 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:06 crc kubenswrapper[4967]: I1121 15:36:06.985387 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:06Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.003650 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.016059 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.021166 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.021205 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.021213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.021227 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.021236 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.031077 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.057349 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.069120 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.081373 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.092155 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.106131 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124118 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124104 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.124129 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.144786 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.162920 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.214067 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.227330 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.227374 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.227384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.227399 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.227409 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.330123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.330178 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.330189 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.330213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.330229 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.433877 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.433915 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.433924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.433944 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.433953 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.535274 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.535294 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:07 crc kubenswrapper[4967]: E1121 15:36:07.535450 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:07 crc kubenswrapper[4967]: E1121 15:36:07.535715 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.537087 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.537287 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.537580 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.537821 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.538060 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.641001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.641386 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.641480 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.641590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.641656 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.744977 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.745024 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.745037 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.745060 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.745075 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.849174 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.849251 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.849272 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.849303 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.849354 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.903581 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/2.log" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.904531 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/1.log" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.906954 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" exitCode=1 Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.907011 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.907060 4967 scope.go:117] "RemoveContainer" containerID="7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.907683 4967 scope.go:117] "RemoveContainer" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" Nov 21 15:36:07 crc kubenswrapper[4967]: E1121 15:36:07.907863 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.921648 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.942857 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b6ade107627a4d50ba3934f012d8fddb94dc7d8c989985f5938dd2d1977fa8e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:35:52Z\\\",\\\"message\\\":\\\"e_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.254\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:35:52.340996 6528 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.952620 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.952688 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.952704 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.952729 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.952748 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:07Z","lastTransitionTime":"2025-11-21T15:36:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.960702 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.976841 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:07 crc kubenswrapper[4967]: I1121 15:36:07.990776 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:07Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.003681 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.016198 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.029855 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.044379 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.055412 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.055492 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.055503 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.055526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.055540 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.060028 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.074967 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.089296 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.102620 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.118977 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.131279 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.146588 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.157908 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.157949 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.157959 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.157981 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.157992 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.162425 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.261090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.261195 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.261211 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.261239 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.261255 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.365134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.365197 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.365210 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.365233 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.365246 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.468193 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.468241 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.468250 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.468268 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.468277 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.507877 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.507930 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.507939 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.507956 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.507967 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.520571 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.525532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.525575 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.525585 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.525602 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.525613 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.535947 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.536066 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.536167 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.536324 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.537826 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.543099 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.543136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.543150 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.543168 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.543184 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.556957 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.561181 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.561231 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.561257 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.561290 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.561352 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.576849 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.580976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.581027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.581040 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.581061 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.581073 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.596282 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:08Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.596502 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.598164 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.598221 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.598232 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.598256 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.598270 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.701105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.701183 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.701199 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.701225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.701244 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.803650 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.803716 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.803730 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.803761 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.803777 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.906807 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.906843 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.906851 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.906865 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.906876 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:08Z","lastTransitionTime":"2025-11-21T15:36:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.911822 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/2.log" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.914944 4967 scope.go:117] "RemoveContainer" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" Nov 21 15:36:08 crc kubenswrapper[4967]: E1121 15:36:08.915086 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.928881 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.942598 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.955874 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.971927 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.985397 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:08 crc kubenswrapper[4967]: I1121 15:36:08.996525 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:08Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.009827 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.009876 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.009892 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.009911 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.009926 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.010469 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.024797 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.037395 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.050705 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.061827 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.074787 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.090924 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.105211 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.112444 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.112483 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.112492 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.112513 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.112525 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.124890 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.146983 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.160438 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:09Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.214815 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.214856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.214866 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.214889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.214901 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.317818 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.317881 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.317891 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.317911 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.317923 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.420237 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.420286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.420298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.420346 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.420357 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.522533 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.522590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.522602 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.522624 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.522637 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.535717 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.535773 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:09 crc kubenswrapper[4967]: E1121 15:36:09.535860 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:09 crc kubenswrapper[4967]: E1121 15:36:09.535995 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.625272 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.625338 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.625357 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.625374 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.625384 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.728089 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.728160 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.728184 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.728216 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.728241 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.830562 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.830620 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.830633 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.830652 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.830664 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.933693 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.933740 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.933752 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.933769 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:09 crc kubenswrapper[4967]: I1121 15:36:09.933782 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:09Z","lastTransitionTime":"2025-11-21T15:36:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.036904 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.036974 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.036991 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.037019 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.037039 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.140364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.140407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.140417 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.140432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.140442 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.243098 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.243162 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.243176 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.243201 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.243223 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.346820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.346875 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.346885 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.346904 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.346915 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.472375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.472742 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.472837 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.472909 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.472972 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.535521 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.536347 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:10 crc kubenswrapper[4967]: E1121 15:36:10.536477 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:10 crc kubenswrapper[4967]: E1121 15:36:10.536657 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.575639 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.575726 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.575740 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.575763 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.575776 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.679401 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.679799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.679899 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.680005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.680106 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.783006 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.783364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.783475 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.783547 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.783625 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.886424 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.886467 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.886480 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.886498 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.886508 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.989115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.989167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.989178 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.989197 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:10 crc kubenswrapper[4967]: I1121 15:36:10.989210 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:10Z","lastTransitionTime":"2025-11-21T15:36:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.091889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.091926 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.091934 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.091949 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.091958 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.194609 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.194684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.194697 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.194718 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.194733 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.297633 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.297675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.297688 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.297707 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.297718 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.399956 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.400005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.400014 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.400032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.400042 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.502064 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.502099 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.502109 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.502124 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.502132 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.535277 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:11 crc kubenswrapper[4967]: E1121 15:36:11.535472 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.535703 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:11 crc kubenswrapper[4967]: E1121 15:36:11.535767 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.604012 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.604050 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.604059 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.604074 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.604084 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.706892 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.706972 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.706987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.707007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.707035 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.810300 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.810369 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.810381 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.810414 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.810424 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.912476 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.912536 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.912551 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.912575 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:11 crc kubenswrapper[4967]: I1121 15:36:11.912589 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:11Z","lastTransitionTime":"2025-11-21T15:36:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.014515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.014565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.014574 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.014592 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.014601 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.118359 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.118419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.118431 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.118451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.118462 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.220852 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.220914 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.220928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.220954 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.220972 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.323801 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.323855 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.323868 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.323893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.323909 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.425977 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.426031 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.426042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.426057 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.426069 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.528106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.528663 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.528671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.528689 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.528700 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.535961 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.535961 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:12 crc kubenswrapper[4967]: E1121 15:36:12.536121 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:12 crc kubenswrapper[4967]: E1121 15:36:12.536155 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.554027 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.579902 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.598434 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.613022 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.630599 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.631897 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.631960 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.631976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.631996 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.632007 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.649372 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.663465 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.680999 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.692780 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.706450 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.720397 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.731097 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.735429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.735478 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.735488 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.735505 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.735518 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.743225 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.758724 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.770483 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.784923 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.800701 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:12Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.837898 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.837952 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.837963 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.837982 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.837994 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.940461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.940534 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.940552 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.940584 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:12 crc kubenswrapper[4967]: I1121 15:36:12.940603 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:12Z","lastTransitionTime":"2025-11-21T15:36:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.042738 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.042789 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.042803 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.042823 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.042837 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.146586 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.146630 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.146638 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.146654 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.146664 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.249618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.249678 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.249689 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.249711 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.249723 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.352694 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.352781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.352800 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.352829 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.352850 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.456121 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.456185 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.456198 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.456222 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.456240 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.535290 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.535405 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:13 crc kubenswrapper[4967]: E1121 15:36:13.535532 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:13 crc kubenswrapper[4967]: E1121 15:36:13.535738 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.559550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.559630 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.559648 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.559679 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.559700 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.663627 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.663680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.663694 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.663714 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.663726 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.766716 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.766806 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.766822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.766847 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.766864 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.871554 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.871628 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.871654 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.871689 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.871710 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.974889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.974937 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.974946 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.974964 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:13 crc kubenswrapper[4967]: I1121 15:36:13.974975 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:13Z","lastTransitionTime":"2025-11-21T15:36:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.077889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.077937 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.077953 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.077975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.077986 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.181416 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.181460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.181468 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.181484 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.181495 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.291478 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.291565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.291595 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.291635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.291666 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.395245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.395368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.395392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.395419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.395436 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.498509 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.498592 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.498611 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.498645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.498665 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.536186 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.536291 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:14 crc kubenswrapper[4967]: E1121 15:36:14.536438 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:14 crc kubenswrapper[4967]: E1121 15:36:14.536571 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.602347 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.602398 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.602412 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.602432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.602444 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.706650 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.706762 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.706787 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.706820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.706846 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.811245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.811350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.811376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.811408 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.811431 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.913879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.913919 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.913951 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.913968 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:14 crc kubenswrapper[4967]: I1121 15:36:14.913977 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:14Z","lastTransitionTime":"2025-11-21T15:36:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.017115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.017179 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.017195 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.017218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.017234 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.120228 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.120276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.120286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.120304 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.120343 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.224108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.224198 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.224238 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.224279 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.224305 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.328255 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.328356 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.328377 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.328407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.328426 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.432038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.432126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.432144 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.432172 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.432196 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.535396 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.535434 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:15 crc kubenswrapper[4967]: E1121 15:36:15.535563 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:15 crc kubenswrapper[4967]: E1121 15:36:15.535708 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.535924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.535975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.535988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.536007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.536021 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.638601 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.638645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.638657 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.638675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.638686 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.741175 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.741255 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.741282 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.741336 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.741351 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.843873 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.843943 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.843956 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.843984 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.844000 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.946762 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.946822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.946842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.946867 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:15 crc kubenswrapper[4967]: I1121 15:36:15.946881 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:15Z","lastTransitionTime":"2025-11-21T15:36:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.049121 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.049173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.049184 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.049204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.049215 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.152634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.152696 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.152710 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.152729 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.152742 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.254910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.254958 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.254969 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.254987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.255001 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.358461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.358514 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.358523 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.358541 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.358552 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.462501 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.462582 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.462594 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.462617 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.462629 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.536260 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.536469 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:16 crc kubenswrapper[4967]: E1121 15:36:16.536639 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:16 crc kubenswrapper[4967]: E1121 15:36:16.536792 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.565487 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.565548 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.565565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.565587 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.565601 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.668273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.668345 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.668361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.668384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.668399 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.772005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.772046 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.772055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.772072 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.772083 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.875443 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.875554 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.875614 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.875637 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.875651 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.978105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.978143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.978152 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.978167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:16 crc kubenswrapper[4967]: I1121 15:36:16.978176 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:16Z","lastTransitionTime":"2025-11-21T15:36:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.079903 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.079939 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.079948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.079967 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.079976 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.181603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.181634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.181643 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.181657 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.181668 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.284234 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.284286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.284296 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.284328 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.284339 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.387336 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.387398 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.387411 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.387441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.387457 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.490384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.490424 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.490435 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.490453 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.490464 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.535223 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.535303 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:17 crc kubenswrapper[4967]: E1121 15:36:17.535402 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:17 crc kubenswrapper[4967]: E1121 15:36:17.535475 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.593461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.593526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.593539 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.593562 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.593576 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.696625 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.696674 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.696687 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.696752 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.696769 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.799472 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.799531 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.799542 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.799564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.799577 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.901859 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.901910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.901921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.901941 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:17 crc kubenswrapper[4967]: I1121 15:36:17.901956 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:17Z","lastTransitionTime":"2025-11-21T15:36:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.003832 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.003880 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.003893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.003910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.003919 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.106518 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.106622 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.106642 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.106672 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.106691 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.209207 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.209305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.209337 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.209364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.209377 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.311688 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.311766 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.311776 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.311794 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.312047 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.414025 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.414080 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.414091 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.414106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.414115 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.516723 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.516774 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.516784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.516802 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.516815 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.537931 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.538056 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.538250 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.538338 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.618749 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.618782 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.618792 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.618810 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.618822 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.721784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.721847 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.721861 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.721882 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.721893 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.824516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.824554 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.824565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.824580 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.824589 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.873626 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.873680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.873693 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.873713 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.873726 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.887911 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:18Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.891972 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.892032 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.892040 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.892055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.892066 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.903283 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:18Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.908123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.908173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.908186 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.908209 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.908222 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.922384 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:18Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.926975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.927026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.927041 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.927063 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.927073 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.940122 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:18Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.944026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.944070 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.944083 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.944102 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.944116 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.955895 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:18Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:18Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:18 crc kubenswrapper[4967]: E1121 15:36:18.956015 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.958118 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.958150 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.958159 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.958175 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:18 crc kubenswrapper[4967]: I1121 15:36:18.958188 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:18Z","lastTransitionTime":"2025-11-21T15:36:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.067886 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.067935 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.067946 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.067968 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.067981 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.172284 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.172854 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.172894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.172976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.173005 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.275535 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.275592 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.275609 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.275636 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.275654 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.378995 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.379077 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.379103 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.379138 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.379169 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.485560 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.485605 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.485615 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.485632 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.485642 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.536088 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:19 crc kubenswrapper[4967]: E1121 15:36:19.536256 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.536390 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:19 crc kubenswrapper[4967]: E1121 15:36:19.536461 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.587879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.587924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.587934 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.587952 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.587962 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.690827 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.690906 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.690922 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.690956 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.690970 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.793799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.793865 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.793874 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.793890 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.793908 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.896445 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.896497 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.896513 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.896532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.896547 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.999177 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.999247 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.999264 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.999665 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:19 crc kubenswrapper[4967]: I1121 15:36:19.999729 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:19Z","lastTransitionTime":"2025-11-21T15:36:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.103422 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.103487 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.103501 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.103523 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.103539 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.207770 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.207813 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.207827 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.207850 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.207865 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.311402 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.311458 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.311472 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.311494 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.311508 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.413962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.414011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.414023 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.414042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.414054 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.516149 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.516196 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.516230 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.516249 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.516259 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.535541 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:20 crc kubenswrapper[4967]: E1121 15:36:20.535687 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.535821 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:20 crc kubenswrapper[4967]: E1121 15:36:20.536538 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.536888 4967 scope.go:117] "RemoveContainer" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" Nov 21 15:36:20 crc kubenswrapper[4967]: E1121 15:36:20.537275 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.618233 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.618264 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.618273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.618287 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.618297 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.720291 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.720350 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.720361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.720378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.720389 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.822814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.822859 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.822869 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.822886 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.822896 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.925427 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.925466 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.925474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.925489 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:20 crc kubenswrapper[4967]: I1121 15:36:20.925499 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:20Z","lastTransitionTime":"2025-11-21T15:36:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.031647 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.032283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.032327 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.032352 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.032368 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.136056 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.136101 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.136113 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.136135 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.136162 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.239179 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.239221 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.239233 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.239253 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.239264 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.342882 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.342978 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.343000 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.343028 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.343045 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.446044 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.446096 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.446108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.446147 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.446160 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.536028 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.536107 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:21 crc kubenswrapper[4967]: E1121 15:36:21.536199 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:21 crc kubenswrapper[4967]: E1121 15:36:21.536360 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.549617 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.549672 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.549686 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.549709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.549726 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.652822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.652861 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.652872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.652889 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.652900 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.755108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.755159 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.755172 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.755194 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.755207 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.858608 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.858675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.858690 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.858719 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.858736 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.961482 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.961534 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.961544 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.961564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:21 crc kubenswrapper[4967]: I1121 15:36:21.961580 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:21Z","lastTransitionTime":"2025-11-21T15:36:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.064469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.064522 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.064532 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.064550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.064562 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.167462 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.167537 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.167555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.167580 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.167596 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.270378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.270427 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.270441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.270464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.270477 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.373385 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.373429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.373441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.373460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.373478 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.476376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.476432 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.476446 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.476466 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.476484 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.536117 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:22 crc kubenswrapper[4967]: E1121 15:36:22.536334 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.536392 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:22 crc kubenswrapper[4967]: E1121 15:36:22.536523 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.548877 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.561541 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.574591 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.579206 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.579263 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.579282 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.579305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.579336 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.589202 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.601857 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.616981 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.630145 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.634892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:22 crc kubenswrapper[4967]: E1121 15:36:22.635089 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:22 crc kubenswrapper[4967]: E1121 15:36:22.635205 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:36:54.635179452 +0000 UTC m=+102.893700460 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.642965 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.659166 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.672278 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.682077 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.682134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.682144 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.682163 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.682176 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.685977 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.704596 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.718033 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.731402 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.746178 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.760682 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.773267 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:22Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.785329 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.785390 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.785404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.785425 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.785442 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.887634 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.887682 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.887695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.887713 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.887727 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.990832 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.990890 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.990900 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.990917 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:22 crc kubenswrapper[4967]: I1121 15:36:22.990926 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:22Z","lastTransitionTime":"2025-11-21T15:36:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.093041 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.093090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.093104 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.093123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.093134 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.195471 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.195502 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.195511 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.195527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.195539 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.297882 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.297915 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.297924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.297940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.297950 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.401423 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.401469 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.401482 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.401500 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.401514 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.505196 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.505281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.505337 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.505363 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.505408 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.535439 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.535523 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:23 crc kubenswrapper[4967]: E1121 15:36:23.535951 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:23 crc kubenswrapper[4967]: E1121 15:36:23.536114 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.608026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.608077 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.608089 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.608107 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.608122 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.710791 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.710830 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.710840 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.710856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.710866 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.814167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.814215 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.814226 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.814242 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.814255 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.916720 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.916793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.916806 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.916823 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:23 crc kubenswrapper[4967]: I1121 15:36:23.916834 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:23Z","lastTransitionTime":"2025-11-21T15:36:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.019895 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.019947 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.019958 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.019978 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.019993 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.122684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.122736 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.122748 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.122775 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.122788 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.225205 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.225248 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.225258 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.225275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.225289 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.328372 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.328437 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.328451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.328475 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.328492 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.431555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.431633 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.431660 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.431708 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.431724 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535282 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535353 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535418 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535430 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535455 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: E1121 15:36:24.535421 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.535495 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.536460 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:24 crc kubenswrapper[4967]: E1121 15:36:24.536784 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.638821 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.638879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.638893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.638917 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.638936 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.742286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.742351 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.742362 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.742384 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.742399 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.844910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.844948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.844957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.844975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.844985 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.949989 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.950036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.950046 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.950063 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.950075 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:24Z","lastTransitionTime":"2025-11-21T15:36:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.965135 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/0.log" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.965227 4967 generic.go:334] "Generic (PLEG): container finished" podID="629a5f41-3cd8-4518-a833-2832f4ebe55a" containerID="3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751" exitCode=1 Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.965266 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerDied","Data":"3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751"} Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.965733 4967 scope.go:117] "RemoveContainer" containerID="3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751" Nov 21 15:36:24 crc kubenswrapper[4967]: I1121 15:36:24.982653 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:24Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.000979 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:24Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.017370 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.032828 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.045984 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.055089 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.055136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.055149 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.055170 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.055186 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.060389 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.071791 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.088538 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.103886 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.120966 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.133344 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.147660 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.157873 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.157939 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.157950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.157974 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.157985 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.161136 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.179629 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.195212 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.211337 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.232143 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.262039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.262116 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.262134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.262159 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.262178 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.364534 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.364593 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.364603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.364622 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.364632 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.467707 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.467776 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.467789 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.467814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.467828 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.535658 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.535722 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:25 crc kubenswrapper[4967]: E1121 15:36:25.535838 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:25 crc kubenswrapper[4967]: E1121 15:36:25.535934 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.571165 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.571209 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.571218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.571235 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.571244 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.674381 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.674463 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.674544 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.674590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.674618 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.777223 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.777275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.777283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.777300 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.777327 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.880798 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.880870 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.880885 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.880915 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.880931 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.971580 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/0.log" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.971655 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerStarted","Data":"691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.983293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.983358 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.983368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.983383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.983393 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:25Z","lastTransitionTime":"2025-11-21T15:36:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:25 crc kubenswrapper[4967]: I1121 15:36:25.991572 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:25Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.006407 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.021152 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.035941 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.046781 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.059704 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.078523 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.085874 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.085936 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.085955 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.085985 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.086003 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.091667 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.104507 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.124293 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.140936 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.159225 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.172678 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.185559 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.189215 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.189252 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.189289 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.189328 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.189342 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.200890 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.216618 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.231270 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:26Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.291171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.291209 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.291218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.291235 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.291245 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.394778 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.394828 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.394837 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.394856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.394867 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.498218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.498300 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.498335 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.498379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.498392 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.536086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.536289 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:26 crc kubenswrapper[4967]: E1121 15:36:26.536505 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:26 crc kubenswrapper[4967]: E1121 15:36:26.536297 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.601753 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.601820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.601834 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.601860 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.601876 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.704802 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.704875 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.704894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.704924 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.704950 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.808428 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.808497 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.808512 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.808534 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.808552 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.911102 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.911158 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.911179 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.911203 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:26 crc kubenswrapper[4967]: I1121 15:36:26.911217 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:26Z","lastTransitionTime":"2025-11-21T15:36:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.014211 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.014281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.014299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.014392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.014411 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.117455 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.117511 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.117528 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.117552 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.117565 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.220204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.220260 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.220269 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.220286 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.220299 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.324529 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.324635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.324660 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.324720 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.324742 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.428512 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.428584 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.428610 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.428649 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.428674 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.531476 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.531540 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.531550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.531570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.531580 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.535860 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.535896 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:27 crc kubenswrapper[4967]: E1121 15:36:27.536021 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:27 crc kubenswrapper[4967]: E1121 15:36:27.536117 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.635083 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.635136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.635150 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.635175 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.635190 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.738916 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.738966 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.738977 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.738997 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.739012 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.842368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.842413 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.842423 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.842440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.842452 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.945879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.945931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.945940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.945958 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:27 crc kubenswrapper[4967]: I1121 15:36:27.945968 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:27Z","lastTransitionTime":"2025-11-21T15:36:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.049207 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.049280 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.049294 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.049370 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.049392 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.152709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.153150 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.153294 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.153439 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.153523 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.257818 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.257928 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.257940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.257959 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.257974 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.360671 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.360712 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.360722 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.360739 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.360748 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.463954 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.464004 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.464013 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.464045 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.464058 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.536142 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.536169 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:28 crc kubenswrapper[4967]: E1121 15:36:28.536379 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:28 crc kubenswrapper[4967]: E1121 15:36:28.536497 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.567998 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.568048 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.568062 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.568083 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.568105 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.674687 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.674957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.675056 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.675094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.675171 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.779003 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.779066 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.779078 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.779100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.779113 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.881382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.881430 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.881438 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.881454 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.881462 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.984706 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.984776 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.984795 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.984821 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:28 crc kubenswrapper[4967]: I1121 15:36:28.984840 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:28Z","lastTransitionTime":"2025-11-21T15:36:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.089011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.089110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.089137 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.089173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.089199 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.137401 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.137474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.137488 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.137512 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.137525 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.155550 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:29Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.163098 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.163201 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.163225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.163252 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.163269 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.187859 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:29Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.194575 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.194687 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.194715 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.194755 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.194780 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.219140 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:29Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.226494 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.226588 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.226612 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.226642 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.226660 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.245266 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:29Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.250194 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.250237 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.250251 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.250275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.250293 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.263566 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:29Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.263721 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.266038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.266100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.266115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.266136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.266147 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.369207 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.369283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.369296 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.369344 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.369362 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.473125 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.473182 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.473194 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.473215 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.473233 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.535576 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.535627 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.535788 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:29 crc kubenswrapper[4967]: E1121 15:36:29.535956 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.576515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.576578 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.576598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.576627 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.576649 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.679873 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.679940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.679964 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.679999 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.680022 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.782382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.782427 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.782440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.782460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.782473 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.886201 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.886264 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.886281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.886307 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.886357 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.988822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.988874 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.988896 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.988931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:29 crc kubenswrapper[4967]: I1121 15:36:29.988955 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:29Z","lastTransitionTime":"2025-11-21T15:36:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.092647 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.092731 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.092756 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.092790 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.092817 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.195794 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.195856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.195872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.195897 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.195911 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.299556 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.299636 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.299653 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.299684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.299705 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.403773 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.403813 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.403822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.403837 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.403848 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.506698 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.506759 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.506776 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.506800 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.506813 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.535746 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.535772 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:30 crc kubenswrapper[4967]: E1121 15:36:30.535975 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:30 crc kubenswrapper[4967]: E1121 15:36:30.536108 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.610339 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.610404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.610420 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.610443 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.610459 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.714404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.714456 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.714656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.714678 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.714693 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.817555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.817606 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.817624 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.817645 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.817662 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.920786 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.920851 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.920865 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.920894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:30 crc kubenswrapper[4967]: I1121 15:36:30.920909 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:30Z","lastTransitionTime":"2025-11-21T15:36:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.024518 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.024572 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.024585 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.024607 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.024626 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.127487 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.127567 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.127583 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.127605 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.127616 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.230057 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.230109 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.230126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.230152 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.230170 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.332529 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.332578 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.332588 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.332608 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.332619 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.436070 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.436128 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.436143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.436164 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.436177 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.535410 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:31 crc kubenswrapper[4967]: E1121 15:36:31.535564 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.535625 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:31 crc kubenswrapper[4967]: E1121 15:36:31.535967 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.536449 4967 scope.go:117] "RemoveContainer" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.539256 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.539287 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.539298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.539333 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.539348 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.644969 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.645038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.645055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.645082 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.645098 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.748520 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.748582 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.748601 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.748627 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.748641 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.851746 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.851805 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.851816 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.851842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.851861 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.954838 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.954884 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.954893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.954909 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:31 crc kubenswrapper[4967]: I1121 15:36:31.954919 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:31Z","lastTransitionTime":"2025-11-21T15:36:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.006842 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/2.log" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.012305 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.013537 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.033170 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.048922 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.057654 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.057698 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.057716 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.057735 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.057746 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.065066 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.086695 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.101030 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.122543 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.139172 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.153948 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.161488 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.161527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.161539 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.161558 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.161572 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.173441 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.186967 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.200680 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.213608 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.224756 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.235026 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.248875 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264086 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264121 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.264134 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.284564 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.367403 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.367462 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.367474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.367492 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.367503 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.470724 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.470793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.470808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.470843 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.470859 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.536171 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.536242 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:32 crc kubenswrapper[4967]: E1121 15:36:32.536400 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:32 crc kubenswrapper[4967]: E1121 15:36:32.536787 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.552369 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.566632 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.573515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.573564 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.573574 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.573590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.573600 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.580892 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.602902 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.620053 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.637177 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.654044 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.669118 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.676581 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.676632 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.676648 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.676703 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.676722 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.683410 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.706175 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.722063 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.739941 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.757185 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.772343 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.779733 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.779778 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.779790 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.779808 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.779817 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.788750 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.801855 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.813852 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:32Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.883596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.883648 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.883666 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.883691 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.883710 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.987108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.987157 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.987170 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.987189 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:32 crc kubenswrapper[4967]: I1121 15:36:32.987202 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:32Z","lastTransitionTime":"2025-11-21T15:36:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.018256 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/3.log" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.019405 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/2.log" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.023606 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.023717 4967 scope.go:117] "RemoveContainer" containerID="463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.023720 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" exitCode=1 Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.024412 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:36:33 crc kubenswrapper[4967]: E1121 15:36:33.024648 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.043092 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.084085 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.095640 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.095695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.095706 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.095730 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.095743 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.105386 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.122652 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.140892 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.156162 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.168947 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.186966 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.198868 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.198914 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.198927 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.198952 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.198967 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.199270 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.214083 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.233254 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://463a5373910df87d43b3bfb611e434283cccb8049417bf628b6bf29007178abc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:07Z\\\",\\\"message\\\":\\\"ces.lbConfig(nil)\\\\nI1121 15:36:07.443521 6681 services_controller.go:445] Built service openshift-kube-controller-manager-operator/metrics LB template configs for network=default: []services.lbConfig(nil)\\\\nI1121 15:36:07.443552 6681 services_controller.go:451] Built service openshift-kube-controller-manager-operator/metrics cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-kube-controller-manager-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-kube-controller-manager-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.219\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nF1121 15:36:07.443426 6681 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fc\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:32Z\\\",\\\"message\\\":\\\"1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399098 7032 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399205 7032 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 15:36:32.399252 7032 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399220 7032 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399367 7032 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.400030 7032 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 15:36:32.400085 7032 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 15:36:32.400092 7032 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 15:36:32.400116 7032 factory.go:656] Stopping watch factory\\\\nI1121 15:36:32.400132 7032 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15:36:32.400142 7032 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 15:36:32.400168 7032 metrics.go:553] Stopping metrics server at address\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.249005 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.260879 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.276261 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.291185 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.301244 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.301379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.301396 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.301416 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.301428 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.305527 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.318158 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:33Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.404038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.404124 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.404135 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.404153 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.404164 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.506120 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.506154 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.506163 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.506179 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.506190 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.535839 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:33 crc kubenswrapper[4967]: E1121 15:36:33.535970 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.536398 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:33 crc kubenswrapper[4967]: E1121 15:36:33.536560 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.609303 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.609367 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.609377 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.609395 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.609405 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.711551 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.711615 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.711626 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.711641 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.711652 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.813900 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.813948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.813957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.813976 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.813989 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.916551 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.916612 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.916621 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.916637 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:33 crc kubenswrapper[4967]: I1121 15:36:33.916665 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:33Z","lastTransitionTime":"2025-11-21T15:36:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.020176 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.020235 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.020253 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.020277 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.020297 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.028913 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/3.log" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.031806 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:36:34 crc kubenswrapper[4967]: E1121 15:36:34.031980 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.053569 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.074580 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.094577 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.114929 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.123471 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.123518 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.123533 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.123552 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.123566 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.128486 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.140532 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.152674 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.166789 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.180441 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.194926 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.206896 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.216618 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.225856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.225912 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.225923 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.225946 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.225964 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.230422 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.243825 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.254885 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.268967 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.286937 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:32Z\\\",\\\"message\\\":\\\"1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399098 7032 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399205 7032 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 15:36:32.399252 7032 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399220 7032 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399367 7032 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.400030 7032 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 15:36:32.400085 7032 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 15:36:32.400092 7032 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 15:36:32.400116 7032 factory.go:656] Stopping watch factory\\\\nI1121 15:36:32.400132 7032 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15:36:32.400142 7032 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 15:36:32.400168 7032 metrics.go:553] Stopping metrics server at address\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:34Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.329603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.329658 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.329673 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.329696 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.329713 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.432799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.432862 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.432878 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.432902 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.432916 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.536081 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.536163 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:34 crc kubenswrapper[4967]: E1121 15:36:34.536269 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:34 crc kubenswrapper[4967]: E1121 15:36:34.536423 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.537167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.537202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.537220 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.537242 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.537256 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.639833 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.639898 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.639916 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.639945 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.639965 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.742262 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.742375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.742409 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.742441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.742464 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.844920 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.844962 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.844975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.844996 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.845009 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.947769 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.947823 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.947836 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.947890 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:34 crc kubenswrapper[4967]: I1121 15:36:34.947904 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:34Z","lastTransitionTime":"2025-11-21T15:36:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.050684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.050785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.050819 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.050837 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.050852 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.154290 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.154375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.154390 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.154409 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.154419 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.257358 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.257404 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.257416 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.257436 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.257449 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.359283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.359348 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.359361 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.359378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.359389 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.461714 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.461755 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.461768 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.461785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.461797 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.494511 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.494972 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.494943391 +0000 UTC m=+147.753464399 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.495026 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.495071 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.495193 4967 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.495247 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.495234439 +0000 UTC m=+147.753755447 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.495639 4967 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.496622 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.496546566 +0000 UTC m=+147.755067584 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.536086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.536212 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.536370 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.536522 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.563919 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.563952 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.563963 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.563979 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.563989 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.595756 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.595861 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.595948 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.595978 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.595991 4967 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.596013 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.596040 4967 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.596056 4967 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.596042 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.596024089 +0000 UTC m=+147.854545097 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:35 crc kubenswrapper[4967]: E1121 15:36:35.596130 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.596113601 +0000 UTC m=+147.854634689 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.666785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.666831 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.666840 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.666856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.666865 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.769178 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.769214 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.769234 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.769253 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.769264 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.872287 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.872352 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.872362 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.872379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.872390 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.974903 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.974947 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.974955 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.974969 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:35 crc kubenswrapper[4967]: I1121 15:36:35.974980 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:35Z","lastTransitionTime":"2025-11-21T15:36:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.077923 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.077984 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.077999 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.078022 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.078037 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.181771 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.181883 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.181899 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.181923 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.181938 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.285192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.285259 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.285273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.285293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.285303 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.387861 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.387905 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.387917 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.387935 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.387947 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.490806 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.490862 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.490872 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.490887 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.490899 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.535582 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.535729 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:36 crc kubenswrapper[4967]: E1121 15:36:36.535764 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:36 crc kubenswrapper[4967]: E1121 15:36:36.535951 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.593468 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.593550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.593570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.593603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.593625 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.696995 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.697055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.697066 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.697084 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.697095 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.803440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.803516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.803546 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.803585 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.803610 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.907042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.907099 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.907115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.907139 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:36 crc kubenswrapper[4967]: I1121 15:36:36.907157 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:36Z","lastTransitionTime":"2025-11-21T15:36:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.010182 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.010242 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.010255 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.010273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.010286 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.113055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.113106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.113114 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.113132 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.113142 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.215880 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.215921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.215930 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.215944 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.215954 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.318102 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.318161 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.318171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.318190 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.318200 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.420644 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.420703 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.420714 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.420733 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.420742 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.524206 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.524268 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.524278 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.524299 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.524327 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.535574 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:37 crc kubenswrapper[4967]: E1121 15:36:37.535740 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.535938 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:37 crc kubenswrapper[4967]: E1121 15:36:37.536009 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.627814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.627879 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.627891 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.627912 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.627924 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.731054 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.731106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.731117 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.731141 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.731152 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.834623 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.834692 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.834703 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.834723 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.834734 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.938440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.938523 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.938545 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.938577 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:37 crc kubenswrapper[4967]: I1121 15:36:37.938595 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:37Z","lastTransitionTime":"2025-11-21T15:36:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.041338 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.041389 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.041400 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.041421 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.041434 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.145660 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.145745 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.145770 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.145801 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.145819 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.248859 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.248921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.248936 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.248961 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.248976 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.352612 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.352673 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.352685 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.352705 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.352718 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.455521 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.455555 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.455565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.455581 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.455592 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.536644 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.536806 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:38 crc kubenswrapper[4967]: E1121 15:36:38.536880 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:38 crc kubenswrapper[4967]: E1121 15:36:38.537130 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.557947 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.557988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.558002 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.558024 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.558038 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.662218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.662275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.662295 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.662369 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.662395 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.765526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.765571 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.765583 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.765600 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.765612 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.868937 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.868987 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.869003 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.869022 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.869036 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.972915 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.972993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.973013 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.973039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:38 crc kubenswrapper[4967]: I1121 15:36:38.973059 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:38Z","lastTransitionTime":"2025-11-21T15:36:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.076626 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.076705 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.076747 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.076789 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.076816 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.181080 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.181172 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.181193 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.181227 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.181263 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.284704 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.284762 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.284777 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.284803 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.284820 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.388723 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.388769 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.388779 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.388797 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.388811 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.467262 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.467397 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.467426 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.467461 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.467485 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.492146 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.498095 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.498140 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.498157 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.498180 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.498200 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.517869 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.523020 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.523055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.523066 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.523082 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.523095 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.535367 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.535400 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.535495 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.535622 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.543703 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.549228 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.549285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.549298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.549375 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.549395 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.569353 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.574853 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.574910 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.574923 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.574944 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.574960 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.597505 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:39Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:39Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:39 crc kubenswrapper[4967]: E1121 15:36:39.597759 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.599852 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.600036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.600062 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.600092 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.600113 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.704352 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.704395 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.704408 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.704428 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.704444 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.807484 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.807550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.807559 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.807576 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.807589 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.911441 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.911517 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.911531 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.911557 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:39 crc kubenswrapper[4967]: I1121 15:36:39.911571 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:39Z","lastTransitionTime":"2025-11-21T15:36:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.014649 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.014730 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.014755 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.014791 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.014813 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.119504 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.119577 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.119598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.119626 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.119651 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.223270 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.223440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.223467 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.223509 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.223537 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.329682 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.329784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.329810 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.329842 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.329866 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.433602 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.433673 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.433687 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.433710 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.433728 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.535650 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:40 crc kubenswrapper[4967]: E1121 15:36:40.535862 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.536389 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:40 crc kubenswrapper[4967]: E1121 15:36:40.536497 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.537948 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.538042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.538069 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.538110 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.538137 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.642370 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.642413 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.642425 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.642448 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.642463 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.745492 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.745557 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.745574 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.745599 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.745615 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.849210 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.849273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.849291 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.849346 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.849366 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.951932 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.952007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.952026 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.952054 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:40 crc kubenswrapper[4967]: I1121 15:36:40.952074 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:40Z","lastTransitionTime":"2025-11-21T15:36:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.056522 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.056627 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.056647 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.056675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.056699 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.160265 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.160378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.160407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.160440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.160465 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.263475 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.263541 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.263562 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.263588 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.263611 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.367894 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.367944 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.367957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.367978 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.367991 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.471661 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.471740 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.471757 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.471785 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.471804 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.535667 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.535820 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:41 crc kubenswrapper[4967]: E1121 15:36:41.536056 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:41 crc kubenswrapper[4967]: E1121 15:36:41.536149 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.575293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.575358 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.575369 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.575390 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.575403 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.678784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.678845 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.678857 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.678880 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.678898 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.781531 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.781586 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.781599 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.781618 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.781629 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.885099 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.885489 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.885503 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.885521 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.885533 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.989413 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.989479 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.989490 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.989515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:41 crc kubenswrapper[4967]: I1121 15:36:41.989529 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:41Z","lastTransitionTime":"2025-11-21T15:36:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.092709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.092763 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.092775 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.092793 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.092807 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.195927 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.195964 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.195972 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.195988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.195997 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.298647 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.298699 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.298713 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.298732 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.298746 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.401692 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.401745 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.401756 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.401777 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.401788 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.505706 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.505788 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.505807 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.505836 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.505856 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.535620 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:42 crc kubenswrapper[4967]: E1121 15:36:42.535800 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.535810 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:42 crc kubenswrapper[4967]: E1121 15:36:42.536066 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.556075 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.570485 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.588910 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.611773 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.611833 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.611848 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.611896 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.611913 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.612377 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.631989 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.656301 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.675758 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.690529 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.703496 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.716193 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.716261 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.716276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.716298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.716333 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.719987 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.737976 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.759678 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:32Z\\\",\\\"message\\\":\\\"1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399098 7032 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399205 7032 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 15:36:32.399252 7032 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399220 7032 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399367 7032 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.400030 7032 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 15:36:32.400085 7032 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 15:36:32.400092 7032 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 15:36:32.400116 7032 factory.go:656] Stopping watch factory\\\\nI1121 15:36:32.400132 7032 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15:36:32.400142 7032 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 15:36:32.400168 7032 metrics.go:553] Stopping metrics server at address\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.772255 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.786726 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.801047 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.815126 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.819252 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.819293 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.819305 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.819339 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.819352 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.832444 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:42Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.922062 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.922108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.922124 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.922143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:42 crc kubenswrapper[4967]: I1121 15:36:42.922155 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:42Z","lastTransitionTime":"2025-11-21T15:36:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.025033 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.025090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.025109 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.025130 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.025142 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.128460 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.128513 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.128527 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.128546 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.128559 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.231772 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.231813 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.231824 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.231841 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.231851 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.335414 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.335468 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.335476 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.335492 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.335502 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.439510 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.439589 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.439606 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.439635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.439655 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.535388 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.535446 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:43 crc kubenswrapper[4967]: E1121 15:36:43.535557 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:43 crc kubenswrapper[4967]: E1121 15:36:43.535631 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.542654 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.542713 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.542726 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.542744 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.542760 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.645623 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.645674 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.645689 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.645708 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.645721 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.748410 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.748497 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.748519 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.748601 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.748634 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.852641 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.852690 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.852701 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.852719 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.852733 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.955373 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.955416 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.955424 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.955439 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:43 crc kubenswrapper[4967]: I1121 15:36:43.955449 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:43Z","lastTransitionTime":"2025-11-21T15:36:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.059447 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.059516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.059529 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.059551 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.059566 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.162300 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.162405 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.162418 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.162443 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.162457 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.264779 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.264820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.264831 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.264848 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.264859 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.367304 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.367364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.367374 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.367388 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.367397 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.469665 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.469711 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.469724 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.469742 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.469755 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.535719 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.535908 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:44 crc kubenswrapper[4967]: E1121 15:36:44.536091 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:44 crc kubenswrapper[4967]: E1121 15:36:44.536192 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.550732 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.573035 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.573079 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.573089 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.573108 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.573122 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.676057 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.676112 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.676126 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.676149 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.676165 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.779144 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.779204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.779226 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.779245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.779258 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.882219 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.882285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.882295 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.882345 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.882359 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.986562 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.986651 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.986683 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.986720 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:44 crc kubenswrapper[4967]: I1121 15:36:44.986745 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:44Z","lastTransitionTime":"2025-11-21T15:36:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.089751 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.089896 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.089913 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.089932 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.089945 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.193968 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.194027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.194037 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.194058 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.194075 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.296437 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.296479 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.296490 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.296507 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.296518 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.399697 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.399818 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.399835 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.399855 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.399870 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.503515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.503572 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.503590 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.503617 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.503639 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.536145 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.536168 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:45 crc kubenswrapper[4967]: E1121 15:36:45.536432 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:45 crc kubenswrapper[4967]: E1121 15:36:45.537051 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.537628 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:36:45 crc kubenswrapper[4967]: E1121 15:36:45.537983 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.607622 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.607681 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.607695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.607718 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.607737 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.709997 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.710079 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.710103 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.710138 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.710165 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.813379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.813457 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.813483 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.813517 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.813545 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.916171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.916297 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.916330 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.916348 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:45 crc kubenswrapper[4967]: I1121 15:36:45.916359 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:45Z","lastTransitionTime":"2025-11-21T15:36:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.019526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.019871 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.020007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.020278 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.020462 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.124275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.124387 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.124407 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.124435 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.124456 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.227957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.228027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.228045 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.228073 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.228091 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.331906 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.331975 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.331994 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.332025 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.332047 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.436086 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.436175 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.436192 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.436218 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.436239 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.536001 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.536146 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:46 crc kubenswrapper[4967]: E1121 15:36:46.536226 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:46 crc kubenswrapper[4967]: E1121 15:36:46.536466 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.540061 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.540123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.540144 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.540171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.540190 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.643855 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.643913 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.643929 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.643957 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.643973 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.746639 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.746711 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.746731 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.746758 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.746773 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.850197 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.850249 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.850259 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.850275 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.850285 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.953596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.953678 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.953695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.953725 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:46 crc kubenswrapper[4967]: I1121 15:36:46.953870 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:46Z","lastTransitionTime":"2025-11-21T15:36:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.057161 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.057212 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.057221 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.057238 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.057250 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.161216 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.161283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.161298 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.161346 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.161368 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.264566 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.264623 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.264635 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.264655 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.264670 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.367425 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.367489 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.367499 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.367515 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.367529 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.470573 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.470606 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.470616 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.470629 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.470640 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.535613 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.535655 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:47 crc kubenswrapper[4967]: E1121 15:36:47.535805 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:47 crc kubenswrapper[4967]: E1121 15:36:47.535896 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.573391 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.573434 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.573445 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.573462 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.573475 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.676214 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.676278 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.676295 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.676363 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.676386 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.778966 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.779027 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.779042 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.779059 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.779070 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.882598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.882653 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.882666 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.882686 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.882696 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.985245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.985281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.985290 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.985326 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:47 crc kubenswrapper[4967]: I1121 15:36:47.985356 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:47Z","lastTransitionTime":"2025-11-21T15:36:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.087208 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.087254 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.087265 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.087281 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.087293 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.189883 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.189921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.189932 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.189949 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.189961 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.293045 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.293094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.293105 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.293128 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.293141 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.395526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.395572 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.395582 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.395603 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.395613 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.498046 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.498097 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.498107 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.498122 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.498135 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.536020 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.536056 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:48 crc kubenswrapper[4967]: E1121 15:36:48.536221 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:48 crc kubenswrapper[4967]: E1121 15:36:48.536366 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.601450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.601533 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.601548 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.601570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.601583 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.704365 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.704418 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.704427 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.704446 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.704508 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.807657 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.807698 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.807709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.807727 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.807736 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.910870 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.910914 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.910929 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.910949 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:48 crc kubenswrapper[4967]: I1121 15:36:48.910962 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:48Z","lastTransitionTime":"2025-11-21T15:36:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.013467 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.013510 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.013520 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.013540 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.013551 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.116622 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.116667 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.116681 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.116698 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.116708 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.219631 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.219680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.219710 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.219727 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.219738 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.322378 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.322440 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.322451 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.322472 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.322485 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.425700 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.425767 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.425781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.425801 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.425816 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.528942 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.528995 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.529006 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.529023 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.529033 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.535638 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.535832 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.535912 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.535982 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.602684 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.602741 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.602751 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.602770 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.602781 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.617841 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.628660 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.628715 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.628727 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.628747 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.628761 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.644527 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.649614 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.649681 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.649695 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.649718 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.649757 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.666047 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.671430 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.671473 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.671483 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.671501 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.671515 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.686198 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.690430 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.690477 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.690493 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.690511 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.690520 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.702645 4967 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:49Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aa4afe05-81a7-4f49-b297-5ea173e99e75\\\",\\\"systemUUID\\\":\\\"edc01b1f-a566-4bc8-990a-79924b529553\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:49Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:49 crc kubenswrapper[4967]: E1121 15:36:49.702762 4967 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.704709 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.704752 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.704762 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.704780 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.704792 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.808106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.808154 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.808162 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.808180 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.808191 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.911055 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.911107 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.911125 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.911152 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:49 crc kubenswrapper[4967]: I1121 15:36:49.911171 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:49Z","lastTransitionTime":"2025-11-21T15:36:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.015224 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.015364 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.015387 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.015417 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.015439 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.117613 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.117675 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.117686 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.117706 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.117718 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.221284 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.221389 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.221400 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.221418 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.221430 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.324080 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.324140 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.324151 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.324167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.324177 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.427171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.427221 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.427231 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.427251 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.427262 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.529936 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.530002 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.530024 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.530054 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.530075 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.536361 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.536454 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:50 crc kubenswrapper[4967]: E1121 15:36:50.536505 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:50 crc kubenswrapper[4967]: E1121 15:36:50.536652 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.633548 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.633611 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.633630 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.633656 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.633669 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.736526 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.736587 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.736597 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.736611 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.736622 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.839171 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.839225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.839237 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.839258 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.839269 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.942720 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.942786 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.942799 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.942819 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:50 crc kubenswrapper[4967]: I1121 15:36:50.942832 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:50Z","lastTransitionTime":"2025-11-21T15:36:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.045972 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.046009 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.046019 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.046036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.046046 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.148379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.148466 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.148481 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.148512 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.148530 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.251716 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.251772 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.251784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.251806 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.251819 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.354231 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.354296 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.354312 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.354376 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.354389 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.457018 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.457094 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.457106 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.457123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.457137 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.535357 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.535484 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:51 crc kubenswrapper[4967]: E1121 15:36:51.535622 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:51 crc kubenswrapper[4967]: E1121 15:36:51.535811 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.560362 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.560428 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.560442 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.560465 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.560481 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.662722 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.662790 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.662800 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.662820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.662841 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.765520 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.765571 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.765580 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.765598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.765611 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.867907 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.867960 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.867974 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.867993 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.868003 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.970901 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.970973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.970988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.971011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:51 crc kubenswrapper[4967]: I1121 15:36:51.971029 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:51Z","lastTransitionTime":"2025-11-21T15:36:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.074174 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.074245 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.074262 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.074282 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.074297 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.177385 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.177465 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.177483 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.177511 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.177532 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.280081 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.280145 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.280161 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.280185 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.280204 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.382942 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.383005 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.383018 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.383037 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.383049 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.485453 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.485506 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.485518 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.485537 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.485550 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.536131 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.536332 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:52 crc kubenswrapper[4967]: E1121 15:36:52.536570 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:52 crc kubenswrapper[4967]: E1121 15:36:52.536779 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.552164 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"68a41149-faa4-4822-b4d2-09d2461d2078\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faa5cf8d5f0aa74c72abd3fe2c01372b3089066842b87cf74df4f9accde84fcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1812d7c4dec38deedb6295479db0a8c84ddd96a8e41d191b381582c01318c3ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://67676d67141387f36bdc8d39929bc2992c37ec12d9b3c1553f8a043e30a39d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c90e0e785fa2f72ffa703a0698e565bd1c10166d1fddd0b2123d61c6fdaddeb6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.564833 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-c8qfd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"62aa85b6-33c6-4631-8877-e4d4f4f8bb16\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4f1d7d6bc3810a30c2624b49e3797ce2c423ae45b15cbde9e18613ddb203494\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r49vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-c8qfd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.582194 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3086bf03-8158-4314-9f35-b08d53a14758\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://774280438f157e06c72041b036a123ad3eace48093b3c441389e3f425a029e6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://afff78c1fb41ebd1a12558b1184530d167b00b74991b4e3ce5ec14237093bd0f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://128f1613a27fc5baa3993fdd0830890a2ac34928b4bd14a8f1e66310140108b5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8cd6392682fed3a23264ea80523f25d8c5715d79dd81cde584a4c3e8520b32e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f52820abc9c7f94e9a51ea2bff8d5106b3eda85c3226377d01f666e505636f3d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"file observer\\\\nW1121 15:35:31.331571 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 15:35:31.331780 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 15:35:31.333148 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-900085266/tls.crt::/tmp/serving-cert-900085266/tls.key\\\\\\\"\\\\nI1121 15:35:31.868270 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 15:35:31.883434 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 15:35:31.883470 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 15:35:31.883503 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 15:35:31.883509 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 15:35:31.895717 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 15:35:31.895749 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895756 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 15:35:31.895762 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 15:35:31.895765 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 15:35:31.895768 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 15:35:31.895771 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 15:35:31.895789 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 15:35:31.906777 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:17Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7acaccaa321b54a23fb6e3035b2bc94b51c094e55f61dd4d9422d5c919d69cb6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:15Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2feaf3265c6921a7137ac496ab8bd2d07924154356eb36516de9cd035296c20f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.595039 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.595090 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.595103 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.595121 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.595131 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.599833 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3648ec0b7cab784afa18c36d62320445c369f793afbed2b06a13833a445dd4dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.617355 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.631626 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64939186037f7c926ae94d12527527670e892f6d464a6ac6f32731ebd1247949\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.642162 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-8srmv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9af74498-5bb0-49a3-bf13-2ea73a127539\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e727062c6641f2612d3f4e2e78b63f23a6dd7a6e19a8dd41ff46652f880c1a26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ktz85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:36Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-8srmv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.657054 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8f12a156-8db0-49be-a048-e7c4988f9cd0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://46e44dae28700f238349c317f5a18c48f4bbab172fddb076fd5748ec82561663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7ljns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-lrth2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.672995 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jpln7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"301ed826-105f-43b3-b553-38186c8cc1be\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc6b8fd191040fcde162739df62ea02dd350c38557b04ba5abaf208479a7dc3e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://85496bd0202b78443921de0a43e2603c5caee3aff1167096b183949963731c36\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://516927adac4659f7096db4fb5f052286f8ce0117e6cc1ff5218a972660907225\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:40Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c372ddac9b42f9ace5d5cb3ca6e4a15e86ec664132c74f0e6b7c64215d60915e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://03146c99f984c0c976b4fa80874164074e4707d5a9fc5e0c700df42cc6dc6482\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88691f0058d74849a7e3f2031dd9d78fef011ebded72379f2434e607f3cc1545\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://359636d2ae0766f9e521228951702e64a42d403d50790fa061b71097d729a313\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jssfh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jpln7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.687641 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e413228d-eaa3-45fb-8adf-35e0054bf53c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tnwnx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:50Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-kj7qv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.698729 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.698773 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.698784 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.698803 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.698814 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.702931 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fc0e38d4a5be2d187f293f81ab71cb72a58ff3a2a2359c67614d939dd3b0316b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0bf6d18c8b48a9b0e6de42d76f77f8d5ed7398a0608c97f98c443d31c1add15f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.728360 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:32Z\\\",\\\"message\\\":\\\"1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399098 7032 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399205 7032 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1121 15:36:32.399252 7032 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399220 7032 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.399367 7032 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1121 15:36:32.400030 7032 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 15:36:32.400085 7032 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 15:36:32.400092 7032 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 15:36:32.400116 7032 factory.go:656] Stopping watch factory\\\\nI1121 15:36:32.400132 7032 ovnkube.go:599] Stopped ovnkube\\\\nI1121 15:36:32.400142 7032 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 15:36:32.400168 7032 metrics.go:553] Stopping metrics server at address\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:36:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wvcsc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-zm492\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.741411 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77051baa-16c8-41bf-98e3-b6dbd8f19ccf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://676c95cb57812034e79459f98d501941a5b31d61ac91866badff8869a367711b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ffb7b3741903cd837195627440e1c4436f00b155998f350b4783615ade3a0bcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffb7b3741903cd837195627440e1c4436f00b155998f350b4783615ade3a0bcb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T15:35:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.756456 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa9c89e6-8018-472f-9bb7-fa96ed442edc\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ac22327094d519fbcab9c73fc96cf57469a6937657b8ffd25e40dcc068be056c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7e97d7aca77762917f22ee987355f8a51ee1c78c43ebce4579f7e9450a21abcb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9230aa95463474dc20f6da55196881aa91325fb949e8fbcfef11fa5f4f8c9090\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f45033e2d34f7b87ed3df926bee0dc8fd2ab79407e9ddf83466c0bb6b1826f97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:12Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.772342 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.787248 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:31Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.800900 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.800961 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.800973 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.800992 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.801006 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.802255 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-j4dcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"629a5f41-3cd8-4518-a833-2832f4ebe55a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:36:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T15:36:24Z\\\",\\\"message\\\":\\\"2025-11-21T15:35:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe\\\\n2025-11-21T15:35:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_3dcf63e2-67e0-43a7-92f1-d7292bf82bbe to /host/opt/cni/bin/\\\\n2025-11-21T15:35:39Z [verbose] multus-daemon started\\\\n2025-11-21T15:35:39Z [verbose] Readiness Indicator file check\\\\n2025-11-21T15:36:24Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T15:35:37Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:36:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jv5wt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:37Z\\\"}}\" for pod \"openshift-multus\"/\"multus-j4dcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.816834 4967 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69ff522a-c497-426d-9af8-5afbdb04dc0b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T15:35:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://35ae41a962dd5c84878f0e62b1ffcda33b472b4b3cb5e0640dd0225c248e91ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef0b21a6b8e31c45be0bffd76d312f5cf239fd00026e115c5df84dfd8feccea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T15:35:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzdls\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T15:35:49Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-79w5v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T15:36:52Z is after 2025-08-24T17:21:41Z" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.904464 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.904525 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.904540 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.904570 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:52 crc kubenswrapper[4967]: I1121 15:36:52.904586 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:52Z","lastTransitionTime":"2025-11-21T15:36:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.007370 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.007426 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.007439 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.007465 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.007480 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.110605 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.110662 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.110680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.110702 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.110718 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.213188 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.213241 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.213253 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.213270 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.213284 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.316593 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.317008 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.317100 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.317195 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.317299 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.420197 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.420854 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.420988 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.421157 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.421536 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.525148 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.525220 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.525233 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.525253 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.525267 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.535976 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.536032 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:53 crc kubenswrapper[4967]: E1121 15:36:53.536547 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:53 crc kubenswrapper[4967]: E1121 15:36:53.536746 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.627332 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.627386 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.627397 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.627450 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.627466 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.729485 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.729559 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.729571 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.729589 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.729600 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.832102 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.832177 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.832189 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.832208 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.832221 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.933851 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.933884 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.933893 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.933908 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:53 crc kubenswrapper[4967]: I1121 15:36:53.933916 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:53Z","lastTransitionTime":"2025-11-21T15:36:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.036797 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.036833 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.036841 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.036856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.036865 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.139722 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.139771 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.139781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.139801 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.139811 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.242016 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.242068 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.242084 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.242109 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.242124 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.345017 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.345060 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.345069 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.345086 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.345096 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.447598 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.447652 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.447665 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.447685 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.447699 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.535850 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.535932 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:54 crc kubenswrapper[4967]: E1121 15:36:54.536058 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:54 crc kubenswrapper[4967]: E1121 15:36:54.536182 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.550175 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.550225 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.550238 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.550256 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.550268 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.653697 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.653740 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.653749 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.653765 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.653777 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.719398 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:54 crc kubenswrapper[4967]: E1121 15:36:54.719619 4967 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:54 crc kubenswrapper[4967]: E1121 15:36:54.719720 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs podName:e413228d-eaa3-45fb-8adf-35e0054bf53c nodeName:}" failed. No retries permitted until 2025-11-21 15:37:58.719700725 +0000 UTC m=+166.978221733 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs") pod "network-metrics-daemon-kj7qv" (UID: "e413228d-eaa3-45fb-8adf-35e0054bf53c") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.755115 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.755143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.755152 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.755167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.755178 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.857547 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.857596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.857608 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.857627 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.857639 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.960927 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.960990 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.961004 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.961025 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:54 crc kubenswrapper[4967]: I1121 15:36:54.961037 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:54Z","lastTransitionTime":"2025-11-21T15:36:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.064758 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.064846 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.064882 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.064921 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.064945 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.167688 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.167746 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.167764 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.167786 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.167798 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.271486 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.271539 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.271550 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.271569 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.271580 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.373967 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.374020 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.374033 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.374051 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.374064 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.477456 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.477516 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.477537 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.477567 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.477583 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.535763 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.535763 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:55 crc kubenswrapper[4967]: E1121 15:36:55.535995 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:55 crc kubenswrapper[4967]: E1121 15:36:55.536045 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.580329 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.580383 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.580396 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.580419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.580436 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.683134 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.683173 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.683185 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.683204 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.683216 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.786509 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.786558 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.786572 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.786596 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.786613 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.890066 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.890392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.890446 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.890474 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.890488 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.994357 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.994419 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.994429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.994463 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:55 crc kubenswrapper[4967]: I1121 15:36:55.994475 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:55Z","lastTransitionTime":"2025-11-21T15:36:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.098703 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.098777 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.098794 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.098824 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.098853 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.202242 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.202291 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.202303 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.202342 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.202353 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.304591 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.304643 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.304661 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.304680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.304690 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.407594 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.407639 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.407648 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.407665 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.407676 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.509680 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.509734 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.509744 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.509761 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.509775 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.535635 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.535976 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:56 crc kubenswrapper[4967]: E1121 15:36:56.536151 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:56 crc kubenswrapper[4967]: E1121 15:36:56.536305 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.549221 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.611986 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.612038 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.612049 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.612072 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.612085 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.715270 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.715368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.715382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.715406 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.715419 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.817843 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.817918 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.817931 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.817950 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.817960 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.920812 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.920884 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.920906 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.920961 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:56 crc kubenswrapper[4967]: I1121 15:36:56.920997 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:56Z","lastTransitionTime":"2025-11-21T15:36:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.024285 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.024366 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.024379 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.024400 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.024434 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.126941 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.127001 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.127012 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.127034 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.127048 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.230142 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.230202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.230211 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.230230 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.230238 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.333162 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.333220 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.333232 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.333249 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.333263 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.435822 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.435870 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.435882 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.435902 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.435914 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.535935 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:57 crc kubenswrapper[4967]: E1121 15:36:57.536070 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.535936 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:57 crc kubenswrapper[4967]: E1121 15:36:57.536240 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.538143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.538202 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.538213 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.538235 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.538247 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.641494 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.641546 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.641556 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.641578 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.641588 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.744772 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.744820 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.744839 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.744856 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.744870 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.847940 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.848000 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.848011 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.848031 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.848044 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.950354 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.950401 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.950410 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.950429 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:57 crc kubenswrapper[4967]: I1121 15:36:57.950440 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:57Z","lastTransitionTime":"2025-11-21T15:36:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.053733 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.053781 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.053795 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.053814 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.053825 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.156274 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.156382 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.156392 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.156415 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.156425 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.259180 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.259238 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.259250 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.259271 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.259285 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.363131 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.363201 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.363237 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.363273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.363296 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.465485 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.465536 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.465565 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.465587 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.465604 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.536237 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.536384 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:36:58 crc kubenswrapper[4967]: E1121 15:36:58.536467 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:36:58 crc kubenswrapper[4967]: E1121 15:36:58.536631 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.541758 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:36:58 crc kubenswrapper[4967]: E1121 15:36:58.543278 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.826652 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.826730 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.826745 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.826767 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.826803 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.930132 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.930209 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.930222 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.930247 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:58 crc kubenswrapper[4967]: I1121 15:36:58.930277 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:58Z","lastTransitionTime":"2025-11-21T15:36:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.033796 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.033866 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.033883 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.033907 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.033922 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.136276 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.136353 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.136367 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.136387 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.136399 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.239229 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.239283 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.239294 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.239312 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.239481 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.341946 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.342007 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.342018 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.342036 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.342051 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.444000 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.444079 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.444117 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.444136 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.444148 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.535224 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.535345 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:36:59 crc kubenswrapper[4967]: E1121 15:36:59.535400 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:36:59 crc kubenswrapper[4967]: E1121 15:36:59.535519 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.546368 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.546434 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.546458 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.546488 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.546510 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.650167 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.650231 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.650244 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.650273 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.650286 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.744060 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.744111 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.744123 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.744143 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.744157 4967 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T15:36:59Z","lastTransitionTime":"2025-11-21T15:36:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.810812 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8"] Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.811335 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.815057 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.815924 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.815980 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.818658 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.845102 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=60.845082405 podStartE2EDuration="1m0.845082405s" podCreationTimestamp="2025-11-21 15:35:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.830343266 +0000 UTC m=+108.088864274" watchObservedRunningTime="2025-11-21 15:36:59.845082405 +0000 UTC m=+108.103603413" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.859098 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-c8qfd" podStartSLOduration=83.859051403 podStartE2EDuration="1m23.859051403s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.846059882 +0000 UTC m=+108.104580920" watchObservedRunningTime="2025-11-21 15:36:59.859051403 +0000 UTC m=+108.117572411" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.876168 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=87.876142298 podStartE2EDuration="1m27.876142298s" podCreationTimestamp="2025-11-21 15:35:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.875757297 +0000 UTC m=+108.134278325" watchObservedRunningTime="2025-11-21 15:36:59.876142298 +0000 UTC m=+108.134663326" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.880086 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/432ec0a6-2f5f-47f9-989a-8d142313466e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.880119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/432ec0a6-2f5f-47f9-989a-8d142313466e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.880159 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.880183 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.880246 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/432ec0a6-2f5f-47f9-989a-8d142313466e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.935743 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-8srmv" podStartSLOduration=83.935718373 podStartE2EDuration="1m23.935718373s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.934305763 +0000 UTC m=+108.192826771" watchObservedRunningTime="2025-11-21 15:36:59.935718373 +0000 UTC m=+108.194239381" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.965212 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podStartSLOduration=83.965186451 podStartE2EDuration="1m23.965186451s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.948898259 +0000 UTC m=+108.207419257" watchObservedRunningTime="2025-11-21 15:36:59.965186451 +0000 UTC m=+108.223707459" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.965625 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-jpln7" podStartSLOduration=83.965620153 podStartE2EDuration="1m23.965620153s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:36:59.965414888 +0000 UTC m=+108.223935916" watchObservedRunningTime="2025-11-21 15:36:59.965620153 +0000 UTC m=+108.224141161" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981602 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981667 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/432ec0a6-2f5f-47f9-989a-8d142313466e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981699 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/432ec0a6-2f5f-47f9-989a-8d142313466e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981729 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/432ec0a6-2f5f-47f9-989a-8d142313466e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981753 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981788 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.981816 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/432ec0a6-2f5f-47f9-989a-8d142313466e-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.982584 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/432ec0a6-2f5f-47f9-989a-8d142313466e-service-ca\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:36:59 crc kubenswrapper[4967]: I1121 15:36:59.998023 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/432ec0a6-2f5f-47f9-989a-8d142313466e-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.000624 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/432ec0a6-2f5f-47f9-989a-8d142313466e-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-d9zs8\" (UID: \"432ec0a6-2f5f-47f9-989a-8d142313466e\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.039909 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=16.039889316 podStartE2EDuration="16.039889316s" podCreationTimestamp="2025-11-21 15:36:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:00.039342041 +0000 UTC m=+108.297863049" watchObservedRunningTime="2025-11-21 15:37:00.039889316 +0000 UTC m=+108.298410324" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.090433 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=4.090403169 podStartE2EDuration="4.090403169s" podCreationTimestamp="2025-11-21 15:36:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:00.068914452 +0000 UTC m=+108.327435470" watchObservedRunningTime="2025-11-21 15:37:00.090403169 +0000 UTC m=+108.348924177" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.090849 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=86.090844732 podStartE2EDuration="1m26.090844732s" podCreationTimestamp="2025-11-21 15:35:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:00.089870945 +0000 UTC m=+108.348391953" watchObservedRunningTime="2025-11-21 15:37:00.090844732 +0000 UTC m=+108.349365750" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.133302 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" Nov 21 15:37:00 crc kubenswrapper[4967]: W1121 15:37:00.150118 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod432ec0a6_2f5f_47f9_989a_8d142313466e.slice/crio-523a3d3141c18c68aa34c1fd7c4d40820796b9fe364bafda70000ca3ddc9955d WatchSource:0}: Error finding container 523a3d3141c18c68aa34c1fd7c4d40820796b9fe364bafda70000ca3ddc9955d: Status 404 returned error can't find the container with id 523a3d3141c18c68aa34c1fd7c4d40820796b9fe364bafda70000ca3ddc9955d Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.159225 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-79w5v" podStartSLOduration=84.15920278 podStartE2EDuration="1m24.15920278s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:00.158975174 +0000 UTC m=+108.417496182" watchObservedRunningTime="2025-11-21 15:37:00.15920278 +0000 UTC m=+108.417723788" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.159664 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-j4dcx" podStartSLOduration=84.159659433 podStartE2EDuration="1m24.159659433s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:00.144622156 +0000 UTC m=+108.403143164" watchObservedRunningTime="2025-11-21 15:37:00.159659433 +0000 UTC m=+108.418180441" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.535375 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:00 crc kubenswrapper[4967]: I1121 15:37:00.535375 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:00 crc kubenswrapper[4967]: E1121 15:37:00.535964 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:00 crc kubenswrapper[4967]: E1121 15:37:00.536015 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:01 crc kubenswrapper[4967]: I1121 15:37:01.141010 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" event={"ID":"432ec0a6-2f5f-47f9-989a-8d142313466e","Type":"ContainerStarted","Data":"123cf8369265720dc8491056b116fdb000b99d6f2921dd31568640e316ee09d1"} Nov 21 15:37:01 crc kubenswrapper[4967]: I1121 15:37:01.141079 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" event={"ID":"432ec0a6-2f5f-47f9-989a-8d142313466e","Type":"ContainerStarted","Data":"523a3d3141c18c68aa34c1fd7c4d40820796b9fe364bafda70000ca3ddc9955d"} Nov 21 15:37:01 crc kubenswrapper[4967]: I1121 15:37:01.158247 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-d9zs8" podStartSLOduration=85.15821662 podStartE2EDuration="1m25.15821662s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:01.157745117 +0000 UTC m=+109.416266125" watchObservedRunningTime="2025-11-21 15:37:01.15821662 +0000 UTC m=+109.416737628" Nov 21 15:37:01 crc kubenswrapper[4967]: I1121 15:37:01.535501 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:01 crc kubenswrapper[4967]: I1121 15:37:01.536002 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:01 crc kubenswrapper[4967]: E1121 15:37:01.536144 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:01 crc kubenswrapper[4967]: E1121 15:37:01.536279 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:02 crc kubenswrapper[4967]: I1121 15:37:02.536386 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:02 crc kubenswrapper[4967]: I1121 15:37:02.536417 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:02 crc kubenswrapper[4967]: E1121 15:37:02.537799 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:02 crc kubenswrapper[4967]: E1121 15:37:02.537915 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:03 crc kubenswrapper[4967]: I1121 15:37:03.536253 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:03 crc kubenswrapper[4967]: I1121 15:37:03.536383 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:03 crc kubenswrapper[4967]: E1121 15:37:03.536445 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:03 crc kubenswrapper[4967]: E1121 15:37:03.536551 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:04 crc kubenswrapper[4967]: I1121 15:37:04.536261 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:04 crc kubenswrapper[4967]: E1121 15:37:04.536964 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:04 crc kubenswrapper[4967]: I1121 15:37:04.537391 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:04 crc kubenswrapper[4967]: E1121 15:37:04.537531 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:05 crc kubenswrapper[4967]: I1121 15:37:05.535940 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:05 crc kubenswrapper[4967]: I1121 15:37:05.535975 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:05 crc kubenswrapper[4967]: E1121 15:37:05.536548 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:05 crc kubenswrapper[4967]: E1121 15:37:05.536656 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:06 crc kubenswrapper[4967]: I1121 15:37:06.536093 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:06 crc kubenswrapper[4967]: I1121 15:37:06.536112 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:06 crc kubenswrapper[4967]: E1121 15:37:06.536296 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:06 crc kubenswrapper[4967]: E1121 15:37:06.536501 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:07 crc kubenswrapper[4967]: I1121 15:37:07.537276 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:07 crc kubenswrapper[4967]: E1121 15:37:07.538353 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:07 crc kubenswrapper[4967]: I1121 15:37:07.539086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:07 crc kubenswrapper[4967]: E1121 15:37:07.539273 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:08 crc kubenswrapper[4967]: I1121 15:37:08.535721 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:08 crc kubenswrapper[4967]: I1121 15:37:08.535731 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:08 crc kubenswrapper[4967]: E1121 15:37:08.535879 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:08 crc kubenswrapper[4967]: E1121 15:37:08.535988 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:09 crc kubenswrapper[4967]: I1121 15:37:09.535775 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:09 crc kubenswrapper[4967]: I1121 15:37:09.535781 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:09 crc kubenswrapper[4967]: E1121 15:37:09.535938 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:09 crc kubenswrapper[4967]: E1121 15:37:09.536417 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:09 crc kubenswrapper[4967]: I1121 15:37:09.536701 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:37:09 crc kubenswrapper[4967]: E1121 15:37:09.536938 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-zm492_openshift-ovn-kubernetes(eeb9277d-9a26-4665-a01c-9ed1c379e8dd)\"" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" Nov 21 15:37:10 crc kubenswrapper[4967]: I1121 15:37:10.536134 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:10 crc kubenswrapper[4967]: I1121 15:37:10.536157 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:10 crc kubenswrapper[4967]: E1121 15:37:10.536367 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:10 crc kubenswrapper[4967]: E1121 15:37:10.536577 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.177013 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/1.log" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.177918 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/0.log" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.178008 4967 generic.go:334] "Generic (PLEG): container finished" podID="629a5f41-3cd8-4518-a833-2832f4ebe55a" containerID="691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e" exitCode=1 Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.178072 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerDied","Data":"691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e"} Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.178134 4967 scope.go:117] "RemoveContainer" containerID="3b15975ad348364e8aab25fb72cd298edfbfb6bd9df9d80ec45589c669bef751" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.179197 4967 scope.go:117] "RemoveContainer" containerID="691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e" Nov 21 15:37:11 crc kubenswrapper[4967]: E1121 15:37:11.179623 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-j4dcx_openshift-multus(629a5f41-3cd8-4518-a833-2832f4ebe55a)\"" pod="openshift-multus/multus-j4dcx" podUID="629a5f41-3cd8-4518-a833-2832f4ebe55a" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.535592 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:11 crc kubenswrapper[4967]: I1121 15:37:11.535592 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:11 crc kubenswrapper[4967]: E1121 15:37:11.535791 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:11 crc kubenswrapper[4967]: E1121 15:37:11.535914 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:12 crc kubenswrapper[4967]: I1121 15:37:12.182254 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/1.log" Nov 21 15:37:12 crc kubenswrapper[4967]: E1121 15:37:12.479376 4967 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 21 15:37:12 crc kubenswrapper[4967]: I1121 15:37:12.536352 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:12 crc kubenswrapper[4967]: E1121 15:37:12.536459 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:12 crc kubenswrapper[4967]: I1121 15:37:12.537129 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:12 crc kubenswrapper[4967]: E1121 15:37:12.537188 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:12 crc kubenswrapper[4967]: E1121 15:37:12.839081 4967 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:37:13 crc kubenswrapper[4967]: I1121 15:37:13.535489 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:13 crc kubenswrapper[4967]: I1121 15:37:13.535559 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:13 crc kubenswrapper[4967]: E1121 15:37:13.535655 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:13 crc kubenswrapper[4967]: E1121 15:37:13.535798 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:14 crc kubenswrapper[4967]: I1121 15:37:14.535829 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:14 crc kubenswrapper[4967]: E1121 15:37:14.536014 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:14 crc kubenswrapper[4967]: I1121 15:37:14.536370 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:14 crc kubenswrapper[4967]: E1121 15:37:14.536450 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:15 crc kubenswrapper[4967]: I1121 15:37:15.535223 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:15 crc kubenswrapper[4967]: I1121 15:37:15.535261 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:15 crc kubenswrapper[4967]: E1121 15:37:15.535428 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:15 crc kubenswrapper[4967]: E1121 15:37:15.535550 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:16 crc kubenswrapper[4967]: I1121 15:37:16.535688 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:16 crc kubenswrapper[4967]: E1121 15:37:16.535884 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:16 crc kubenswrapper[4967]: I1121 15:37:16.536028 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:16 crc kubenswrapper[4967]: E1121 15:37:16.536214 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:17 crc kubenswrapper[4967]: I1121 15:37:17.535501 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:17 crc kubenswrapper[4967]: I1121 15:37:17.535520 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:17 crc kubenswrapper[4967]: E1121 15:37:17.535661 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:17 crc kubenswrapper[4967]: E1121 15:37:17.535800 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:17 crc kubenswrapper[4967]: E1121 15:37:17.841103 4967 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:37:18 crc kubenswrapper[4967]: I1121 15:37:18.535607 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:18 crc kubenswrapper[4967]: I1121 15:37:18.535621 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:18 crc kubenswrapper[4967]: E1121 15:37:18.536361 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:18 crc kubenswrapper[4967]: E1121 15:37:18.536554 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:19 crc kubenswrapper[4967]: I1121 15:37:19.535342 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:19 crc kubenswrapper[4967]: I1121 15:37:19.535374 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:19 crc kubenswrapper[4967]: E1121 15:37:19.535540 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:19 crc kubenswrapper[4967]: E1121 15:37:19.535659 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:20 crc kubenswrapper[4967]: I1121 15:37:20.535423 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:20 crc kubenswrapper[4967]: E1121 15:37:20.535593 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:20 crc kubenswrapper[4967]: I1121 15:37:20.535715 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:20 crc kubenswrapper[4967]: E1121 15:37:20.535885 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:21 crc kubenswrapper[4967]: I1121 15:37:21.535844 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:21 crc kubenswrapper[4967]: I1121 15:37:21.535894 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:21 crc kubenswrapper[4967]: E1121 15:37:21.536165 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:21 crc kubenswrapper[4967]: E1121 15:37:21.536389 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:22 crc kubenswrapper[4967]: I1121 15:37:22.535505 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:22 crc kubenswrapper[4967]: I1121 15:37:22.535505 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:22 crc kubenswrapper[4967]: E1121 15:37:22.536636 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:22 crc kubenswrapper[4967]: E1121 15:37:22.536824 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:22 crc kubenswrapper[4967]: E1121 15:37:22.841718 4967 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:37:23 crc kubenswrapper[4967]: I1121 15:37:23.535851 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:23 crc kubenswrapper[4967]: I1121 15:37:23.535976 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:23 crc kubenswrapper[4967]: E1121 15:37:23.536044 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:23 crc kubenswrapper[4967]: E1121 15:37:23.536264 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:23 crc kubenswrapper[4967]: I1121 15:37:23.537098 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.225384 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/3.log" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.229502 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerStarted","Data":"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6"} Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.230040 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.266200 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podStartSLOduration=108.266175174 podStartE2EDuration="1m48.266175174s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:24.26568947 +0000 UTC m=+132.524210478" watchObservedRunningTime="2025-11-21 15:37:24.266175174 +0000 UTC m=+132.524696182" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.535368 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.535368 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:24 crc kubenswrapper[4967]: E1121 15:37:24.535562 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:24 crc kubenswrapper[4967]: E1121 15:37:24.535794 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.536228 4967 scope.go:117] "RemoveContainer" containerID="691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e" Nov 21 15:37:24 crc kubenswrapper[4967]: I1121 15:37:24.687572 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-kj7qv"] Nov 21 15:37:25 crc kubenswrapper[4967]: I1121 15:37:25.237005 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/1.log" Nov 21 15:37:25 crc kubenswrapper[4967]: I1121 15:37:25.237545 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:25 crc kubenswrapper[4967]: I1121 15:37:25.237548 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerStarted","Data":"3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b"} Nov 21 15:37:25 crc kubenswrapper[4967]: E1121 15:37:25.237706 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:25 crc kubenswrapper[4967]: I1121 15:37:25.535852 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:25 crc kubenswrapper[4967]: I1121 15:37:25.535894 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:25 crc kubenswrapper[4967]: E1121 15:37:25.536083 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:25 crc kubenswrapper[4967]: E1121 15:37:25.536203 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:26 crc kubenswrapper[4967]: I1121 15:37:26.536358 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:26 crc kubenswrapper[4967]: E1121 15:37:26.536579 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 15:37:26 crc kubenswrapper[4967]: I1121 15:37:26.536714 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:26 crc kubenswrapper[4967]: E1121 15:37:26.536999 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-kj7qv" podUID="e413228d-eaa3-45fb-8adf-35e0054bf53c" Nov 21 15:37:27 crc kubenswrapper[4967]: I1121 15:37:27.535247 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:27 crc kubenswrapper[4967]: I1121 15:37:27.535248 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:27 crc kubenswrapper[4967]: E1121 15:37:27.535529 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 15:37:27 crc kubenswrapper[4967]: E1121 15:37:27.535653 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.536041 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.537124 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.538533 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.538694 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.540856 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 21 15:37:28 crc kubenswrapper[4967]: I1121 15:37:28.541117 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 21 15:37:29 crc kubenswrapper[4967]: I1121 15:37:29.535133 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:29 crc kubenswrapper[4967]: I1121 15:37:29.535133 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:29 crc kubenswrapper[4967]: I1121 15:37:29.537605 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 21 15:37:29 crc kubenswrapper[4967]: I1121 15:37:29.539539 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.173887 4967 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.215388 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.216600 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.217267 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.217416 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.218893 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.220489 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.237579 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.239994 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.241168 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.241176 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.241369 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.241634 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.242746 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.242951 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-km26f"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.243038 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.249352 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.249396 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.249618 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.250599 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.250783 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.250932 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.250990 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.251595 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.252097 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.252691 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-882pz"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253101 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253423 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253488 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253488 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253705 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.253887 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.254034 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.256379 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.256766 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.256986 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x7hlr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.257286 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.257819 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-tdgx7"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.258112 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.258362 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.262059 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.262591 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.262798 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.263012 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zcvrf"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.264465 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.266906 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.267055 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.267472 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.267848 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.267901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.268097 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.272789 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-882pz"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.272848 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x7hlr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.274477 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zcvrf"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.275716 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.275977 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.276274 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.276727 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.276924 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.277134 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.277342 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.277453 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.277775 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.277968 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.278539 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282271 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282335 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282351 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282436 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282665 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.282728 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.294990 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.295501 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.296042 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.298637 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.298711 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.298966 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.299406 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.299478 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tdgx7"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.301125 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.304152 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.305115 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.308503 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-km26f"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.313954 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.314268 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.314486 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.314539 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.314693 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.314980 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.315178 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.315420 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.316003 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.316248 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.316591 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.316894 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.318609 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.334870 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335050 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335141 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335223 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335600 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335757 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.335917 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.336245 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.336745 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.337036 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.337399 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.337563 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.337582 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.337811 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.338042 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.338199 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.338301 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.338817 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.338963 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339080 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339387 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339562 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339597 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339705 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339748 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.339836 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340784 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340832 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-machine-approver-tls\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340854 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340877 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcxpf\" (UniqueName: \"kubernetes.io/projected/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-kube-api-access-rcxpf\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340903 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-auth-proxy-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340937 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8bzl\" (UniqueName: \"kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340956 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwjq7\" (UniqueName: \"kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.340980 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341000 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341019 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341043 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341064 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341082 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjjp5\" (UniqueName: \"kubernetes.io/projected/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-kube-api-access-bjjp5\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341111 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341127 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341144 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341162 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.341179 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.343956 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.344718 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.344938 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.346921 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.347246 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.347447 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348527 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348566 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348573 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348673 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348738 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348783 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348821 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.349179 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.349219 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348365 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.349416 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.349439 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.348825 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.350012 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.350796 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.351221 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.353836 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.359008 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.359802 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.360912 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6g4w8"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.361546 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.361889 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.362345 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.364396 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.364800 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.364861 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.365545 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.379427 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.381046 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.387681 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.390368 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.390729 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.392812 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.393073 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.393915 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-gwx9x"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.394830 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.399464 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.399836 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.400687 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.400947 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.404388 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.405641 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.407859 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mv55f"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.411551 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.411734 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.413692 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.415248 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.416026 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.416747 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.417356 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.418077 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.419516 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.421116 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.421811 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.422743 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.422942 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.423641 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.424631 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-j66mb"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.425494 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.426243 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.426772 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.427644 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.428077 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.428995 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.429911 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.430982 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.432462 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-dmk5z"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.432594 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.434926 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.439868 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.439073 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hlspg"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.442852 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.442995 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j65nh\" (UniqueName: \"kubernetes.io/projected/28189f52-065e-4d6f-b959-6a052477f10f-kube-api-access-j65nh\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443062 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwjq7\" (UniqueName: \"kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443147 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443184 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-config\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443220 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvn6s\" (UniqueName: \"kubernetes.io/projected/aba5ef1a-6979-4625-af79-f51e8970c06a-kube-api-access-bvn6s\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443283 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443372 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443409 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-etcd-serving-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443449 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-images\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443491 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443532 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443578 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmvzd\" (UniqueName: \"kubernetes.io/projected/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-kube-api-access-xmvzd\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443621 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443646 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443665 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443680 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-service-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443697 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a02f9e5c-5484-4406-89c3-f7803420a47e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443716 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vb2fv\" (UniqueName: \"kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443743 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443762 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-serving-cert\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443780 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443799 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-audit-policies\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443815 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-client\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443836 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-image-import-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443852 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-encryption-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443873 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443893 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443916 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6cz4\" (UniqueName: \"kubernetes.io/projected/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-kube-api-access-f6cz4\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443934 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-config\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443963 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.443984 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/79cf3908-cb05-42a4-afd4-6e529e7d586d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444008 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444024 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444042 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444058 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-srv-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444073 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444088 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjjp5\" (UniqueName: \"kubernetes.io/projected/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-kube-api-access-bjjp5\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444126 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aba5ef1a-6979-4625-af79-f51e8970c06a-serving-cert\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444142 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkqw7\" (UniqueName: \"kubernetes.io/projected/5253a6fb-0e1a-4441-9a5a-24682a098f45-kube-api-access-kkqw7\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444160 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444180 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444197 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444216 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444236 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444250 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-etcd-client\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444268 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444285 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5253a6fb-0e1a-4441-9a5a-24682a098f45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444301 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f5wd\" (UniqueName: \"kubernetes.io/projected/4c12eeb4-d087-4e18-a9b0-0a2211a6128d-kube-api-access-5f5wd\") pod \"downloads-7954f5f757-tdgx7\" (UID: \"4c12eeb4-d087-4e18-a9b0-0a2211a6128d\") " pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444353 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-config\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444371 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70fb4095-863d-445d-bc3a-bdb264c4abc1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444390 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5253a6fb-0e1a-4441-9a5a-24682a098f45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444406 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/944c0231-2382-4fc2-9e88-e83b473045f8-audit-dir\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444427 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444476 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444494 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/79cf3908-cb05-42a4-afd4-6e529e7d586d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444516 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nllg\" (UniqueName: \"kubernetes.io/projected/70fb4095-863d-445d-bc3a-bdb264c4abc1-kube-api-access-5nllg\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444533 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-trusted-ca\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444551 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-audit\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444572 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-serving-cert\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444588 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444608 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444629 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkxt4\" (UniqueName: \"kubernetes.io/projected/a02f9e5c-5484-4406-89c3-f7803420a47e-kube-api-access-tkxt4\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444648 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-encryption-config\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444671 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcf2z\" (UniqueName: \"kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444702 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444721 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-machine-approver-tls\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444738 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a02f9e5c-5484-4406-89c3-f7803420a47e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444754 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-serving-cert\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444772 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcxpf\" (UniqueName: \"kubernetes.io/projected/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-kube-api-access-rcxpf\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444789 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444822 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-audit-dir\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444948 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-auth-proxy-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444970 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsxrn\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-kube-api-access-gsxrn\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.444989 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbfjz\" (UniqueName: \"kubernetes.io/projected/e1b88a76-dfa5-4273-a7ab-6c0824308b04-kube-api-access-fbfjz\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.445013 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1b88a76-dfa5-4273-a7ab-6c0824308b04-serving-cert\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.445087 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.445645 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.445687 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mv55f"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.445710 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.446029 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.446495 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447048 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447405 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447482 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447706 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-node-pullsecrets\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447789 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447844 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8bzl\" (UniqueName: \"kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447868 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.447890 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lb5zz\" (UniqueName: \"kubernetes.io/projected/944c0231-2382-4fc2-9e88-e83b473045f8-kube-api-access-lb5zz\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.450674 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.452440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.452800 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-zl28t"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.452990 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.453487 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.454642 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.455150 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-machine-approver-tls\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.457149 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.459083 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.459820 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-auth-proxy-config\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.460007 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.461975 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.462250 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.468115 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.470136 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.471934 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-dgdjr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.472806 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.477416 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hlspg"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.477468 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-j66mb"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.477484 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.477559 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.478440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.480272 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6g4w8"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.480503 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.481545 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.483221 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.485213 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.489848 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.492400 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.499922 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.500869 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.502769 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-dgdjr"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.504998 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-zl28t"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.514986 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dmk5z"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.518733 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.520322 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.520347 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.521742 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.522895 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x2cf9"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.525444 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-g9gvx"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.525988 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x2cf9"] Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.526104 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.525066 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.540277 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548450 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsxrn\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-kube-api-access-gsxrn\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548599 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbfjz\" (UniqueName: \"kubernetes.io/projected/e1b88a76-dfa5-4273-a7ab-6c0824308b04-kube-api-access-fbfjz\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548691 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-serving-cert\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548770 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1b88a76-dfa5-4273-a7ab-6c0824308b04-serving-cert\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548845 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.548939 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-node-pullsecrets\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549064 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-node-pullsecrets\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549135 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szqsl\" (UniqueName: \"kubernetes.io/projected/67ec7435-1c30-438c-8da5-8231ab6cf336-kube-api-access-szqsl\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549437 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549529 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lb5zz\" (UniqueName: \"kubernetes.io/projected/944c0231-2382-4fc2-9e88-e83b473045f8-kube-api-access-lb5zz\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549616 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4lw4\" (UniqueName: \"kubernetes.io/projected/a515cddd-29b9-4ced-afe4-cb52ead0fa58-kube-api-access-s4lw4\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549714 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-config\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549716 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549822 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.549959 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j65nh\" (UniqueName: \"kubernetes.io/projected/28189f52-065e-4d6f-b959-6a052477f10f-kube-api-access-j65nh\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550002 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-profile-collector-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550052 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-config\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550077 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvn6s\" (UniqueName: \"kubernetes.io/projected/aba5ef1a-6979-4625-af79-f51e8970c06a-kube-api-access-bvn6s\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550112 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550129 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550147 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-etcd-serving-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550165 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a515cddd-29b9-4ced-afe4-cb52ead0fa58-proxy-tls\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550184 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqgpn\" (UniqueName: \"kubernetes.io/projected/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-kube-api-access-pqgpn\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550208 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-images\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550226 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550226 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550246 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550274 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmvzd\" (UniqueName: \"kubernetes.io/projected/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-kube-api-access-xmvzd\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550295 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550330 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-srv-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550354 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxwsn\" (UniqueName: \"kubernetes.io/projected/659073fe-e665-4953-98b7-fe8e6ac5e075-kube-api-access-mxwsn\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550375 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550391 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-service-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550414 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a02f9e5c-5484-4406-89c3-f7803420a47e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550433 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vb2fv\" (UniqueName: \"kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550455 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15dd7d8c-fd82-4a6b-a749-8182841d9db9-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550478 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-serving-cert\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550496 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550516 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-audit-policies\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550533 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-client\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550553 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67563d08-de10-4aac-bfdd-248379b85548-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550573 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-image-import-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550591 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-encryption-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550614 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550633 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-apiservice-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550651 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n7xm\" (UniqueName: \"kubernetes.io/projected/228df940-1987-44bb-a72e-944bafc22d91-kube-api-access-7n7xm\") pod \"migrator-59844c95c7-q2z9m\" (UID: \"228df940-1987-44bb-a72e-944bafc22d91\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550673 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550691 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-webhook-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550714 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6cz4\" (UniqueName: \"kubernetes.io/projected/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-kube-api-access-f6cz4\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550730 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-config\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a515cddd-29b9-4ced-afe4-cb52ead0fa58-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550769 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550791 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/79cf3908-cb05-42a4-afd4-6e529e7d586d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550807 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550826 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550844 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-service-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550860 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-client\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550877 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15dd7d8c-fd82-4a6b-a749-8182841d9db9-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550875 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-config\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550895 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550948 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.550947 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.551357 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.551365 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-srv-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.551452 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.551978 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552021 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552060 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cdjk\" (UniqueName: \"kubernetes.io/projected/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-kube-api-access-7cdjk\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552100 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aba5ef1a-6979-4625-af79-f51e8970c06a-serving-cert\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552134 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkqw7\" (UniqueName: \"kubernetes.io/projected/5253a6fb-0e1a-4441-9a5a-24682a098f45-kube-api-access-kkqw7\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552161 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552384 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552417 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552436 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-etcd-client\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552454 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552477 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552497 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5253a6fb-0e1a-4441-9a5a-24682a098f45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552518 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f5wd\" (UniqueName: \"kubernetes.io/projected/4c12eeb4-d087-4e18-a9b0-0a2211a6128d-kube-api-access-5f5wd\") pod \"downloads-7954f5f757-tdgx7\" (UID: \"4c12eeb4-d087-4e18-a9b0-0a2211a6128d\") " pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552540 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67563d08-de10-4aac-bfdd-248379b85548-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552575 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-config\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552605 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70fb4095-863d-445d-bc3a-bdb264c4abc1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552629 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5253a6fb-0e1a-4441-9a5a-24682a098f45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552651 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/944c0231-2382-4fc2-9e88-e83b473045f8-audit-dir\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552674 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/659073fe-e665-4953-98b7-fe8e6ac5e075-tmpfs\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552766 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552828 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a02f9e5c-5484-4406-89c3-f7803420a47e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.552905 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553083 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/79cf3908-cb05-42a4-afd4-6e529e7d586d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553392 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nllg\" (UniqueName: \"kubernetes.io/projected/70fb4095-863d-445d-bc3a-bdb264c4abc1-kube-api-access-5nllg\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553423 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-trusted-ca\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553448 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-audit\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553476 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-serving-cert\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553494 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553581 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/944c0231-2382-4fc2-9e88-e83b473045f8-audit-policies\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.553676 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk2sx\" (UniqueName: \"kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554102 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554134 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554163 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkxt4\" (UniqueName: \"kubernetes.io/projected/a02f9e5c-5484-4406-89c3-f7803420a47e-kube-api-access-tkxt4\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554187 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-encryption-config\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554208 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcf2z\" (UniqueName: \"kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554563 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-config\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554798 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a02f9e5c-5484-4406-89c3-f7803420a47e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554844 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-serving-cert\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554878 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67563d08-de10-4aac-bfdd-248379b85548-config\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554947 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15dd7d8c-fd82-4a6b-a749-8182841d9db9-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.554977 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555005 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-audit-dir\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555143 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28189f52-065e-4d6f-b959-6a052477f10f-audit-dir\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555190 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-images\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555573 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aba5ef1a-6979-4625-af79-f51e8970c06a-service-ca-bundle\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555656 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e1b88a76-dfa5-4273-a7ab-6c0824308b04-serving-cert\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.555801 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-image-import-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.556956 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70fb4095-863d-445d-bc3a-bdb264c4abc1-config\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.556982 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.557289 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5253a6fb-0e1a-4441-9a5a-24682a098f45-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.557668 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/944c0231-2382-4fc2-9e88-e83b473045f8-audit-dir\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.557988 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.558095 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-encryption-config\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.558637 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.558748 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/79cf3908-cb05-42a4-afd4-6e529e7d586d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.559060 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e1b88a76-dfa5-4273-a7ab-6c0824308b04-trusted-ca\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.559102 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.559521 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.559640 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/79cf3908-cb05-42a4-afd4-6e529e7d586d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.559855 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560110 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-etcd-serving-ca\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560128 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-etcd-client\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560179 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aba5ef1a-6979-4625-af79-f51e8970c06a-serving-cert\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560377 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560509 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28189f52-065e-4d6f-b959-6a052477f10f-audit\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560733 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.560921 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.561097 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-serving-cert\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.561110 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.561242 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.561784 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.561825 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.562421 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-serving-cert\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.562425 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.562494 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.562605 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-serving-cert\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.563358 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.564006 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5253a6fb-0e1a-4441-9a5a-24682a098f45-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.564873 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-profile-collector-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.565116 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-srv-cert\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.565144 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28189f52-065e-4d6f-b959-6a052477f10f-etcd-client\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.565571 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a02f9e5c-5484-4406-89c3-f7803420a47e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.565671 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/70fb4095-863d-445d-bc3a-bdb264c4abc1-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.567616 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/944c0231-2382-4fc2-9e88-e83b473045f8-encryption-config\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.580956 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.600366 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.639862 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.656516 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4lw4\" (UniqueName: \"kubernetes.io/projected/a515cddd-29b9-4ced-afe4-cb52ead0fa58-kube-api-access-s4lw4\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.656723 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-config\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.656863 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-profile-collector-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.656975 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a515cddd-29b9-4ced-afe4-cb52ead0fa58-proxy-tls\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657048 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqgpn\" (UniqueName: \"kubernetes.io/projected/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-kube-api-access-pqgpn\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657123 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-srv-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657208 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxwsn\" (UniqueName: \"kubernetes.io/projected/659073fe-e665-4953-98b7-fe8e6ac5e075-kube-api-access-mxwsn\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657352 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15dd7d8c-fd82-4a6b-a749-8182841d9db9-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67563d08-de10-4aac-bfdd-248379b85548-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657582 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-apiservice-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657683 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n7xm\" (UniqueName: \"kubernetes.io/projected/228df940-1987-44bb-a72e-944bafc22d91-kube-api-access-7n7xm\") pod \"migrator-59844c95c7-q2z9m\" (UID: \"228df940-1987-44bb-a72e-944bafc22d91\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657797 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-webhook-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.657920 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a515cddd-29b9-4ced-afe4-cb52ead0fa58-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658031 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658148 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658347 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-service-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658534 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-client\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658692 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15dd7d8c-fd82-4a6b-a749-8182841d9db9-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658771 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a515cddd-29b9-4ced-afe4-cb52ead0fa58-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658864 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cdjk\" (UniqueName: \"kubernetes.io/projected/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-kube-api-access-7cdjk\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.658960 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659006 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67563d08-de10-4aac-bfdd-248379b85548-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659026 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/659073fe-e665-4953-98b7-fe8e6ac5e075-tmpfs\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659053 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659095 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk2sx\" (UniqueName: \"kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659165 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67563d08-de10-4aac-bfdd-248379b85548-config\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659207 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15dd7d8c-fd82-4a6b-a749-8182841d9db9-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659225 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659260 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-serving-cert\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659300 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szqsl\" (UniqueName: \"kubernetes.io/projected/67ec7435-1c30-438c-8da5-8231ab6cf336-kube-api-access-szqsl\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659530 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.659985 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/659073fe-e665-4953-98b7-fe8e6ac5e075-tmpfs\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.660744 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-profile-collector-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.662030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a515cddd-29b9-4ced-afe4-cb52ead0fa58-proxy-tls\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.680407 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.700122 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.720518 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.740802 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.759882 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.780606 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.799788 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.821652 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.841037 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.860509 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.880463 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.890865 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67563d08-de10-4aac-bfdd-248379b85548-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.900800 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.911076 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67563d08-de10-4aac-bfdd-248379b85548-config\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.920739 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.939469 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.959981 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 21 15:37:30 crc kubenswrapper[4967]: I1121 15:37:30.980061 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.000934 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.019798 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.032768 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-client\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.040961 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.059951 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.072744 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-serving-cert\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.080923 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.100115 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.110167 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.120198 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.128495 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-config\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.146122 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.149601 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-etcd-service-ca\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.159800 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.179784 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.200215 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.219855 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.248397 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.259618 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.280144 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.301035 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.312672 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/15dd7d8c-fd82-4a6b-a749-8182841d9db9-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.320032 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.329128 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/15dd7d8c-fd82-4a6b-a749-8182841d9db9-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.340241 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.360012 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.379966 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.399806 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.410482 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67ec7435-1c30-438c-8da5-8231ab6cf336-srv-cert\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.420337 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.438154 4967 request.go:700] Waited for 1.014290163s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.440385 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.460903 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.480515 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.499919 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.519672 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.540503 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.560937 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.580490 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.600663 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.619979 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.633233 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-apiservice-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.635183 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/659073fe-e665-4953-98b7-fe8e6ac5e075-webhook-cert\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.640942 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659132 4967 secret.go:188] Couldn't get secret openshift-marketplace/marketplace-operator-metrics: failed to sync secret cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659226 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics podName:4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:32.159204669 +0000 UTC m=+140.417725677 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-operator-metrics" (UniqueName: "kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics") pod "marketplace-operator-79b997595-ql9tj" (UID: "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7") : failed to sync secret cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659232 4967 configmap.go:193] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: failed to sync configmap cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659274 4967 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659363 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key podName:dc8caa89-0bb5-4c35-9bbe-f1212a715f59 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:32.159334763 +0000 UTC m=+140.417855811 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key") pod "service-ca-9c57cc56f-hlspg" (UID: "dc8caa89-0bb5-4c35-9bbe-f1212a715f59") : failed to sync secret cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659387 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca podName:4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:32.159378034 +0000 UTC m=+140.417899132 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca") pod "marketplace-operator-79b997595-ql9tj" (UID: "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7") : failed to sync configmap cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659473 4967 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: E1121 15:37:31.659504 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle podName:dc8caa89-0bb5-4c35-9bbe-f1212a715f59 nodeName:}" failed. No retries permitted until 2025-11-21 15:37:32.159496067 +0000 UTC m=+140.418017155 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle") pod "service-ca-9c57cc56f-hlspg" (UID: "dc8caa89-0bb5-4c35-9bbe-f1212a715f59") : failed to sync configmap cache: timed out waiting for the condition Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.659954 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.680530 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.700409 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.719824 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.745813 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.759769 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.779587 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.799858 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.819239 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.840571 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.859508 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.894547 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwjq7\" (UniqueName: \"kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7\") pod \"console-f9d7485db-m45jq\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.911903 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.915997 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjjp5\" (UniqueName: \"kubernetes.io/projected/51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d-kube-api-access-bjjp5\") pod \"cluster-samples-operator-665b6dd947-w6tpt\" (UID: \"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.934918 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcxpf\" (UniqueName: \"kubernetes.io/projected/52d8fd4a-44ee-47d6-8f16-2b41728f7f1c-kube-api-access-rcxpf\") pod \"machine-approver-56656f9798-dd6s7\" (UID: \"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.940516 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.960234 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 21 15:37:31 crc kubenswrapper[4967]: I1121 15:37:31.980763 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.000430 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.020119 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.058651 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8bzl\" (UniqueName: \"kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl\") pod \"controller-manager-879f6c89f-4wxnm\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.079970 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.083062 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.100351 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.103637 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.119464 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.121026 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.140606 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.160224 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.180568 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.185512 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.185576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.185693 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.186350 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.187247 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-cabundle\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.187379 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.190298 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.190644 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-signing-key\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.202160 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.220838 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.240978 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.260417 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.269331 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.283102 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.288082 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" event={"ID":"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c","Type":"ContainerStarted","Data":"beb76fff34df156bace7714206b48672d52e1d611cb84064fcf6963b076e2fe3"} Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.289592 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.300367 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.308194 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.320797 4967 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 21 15:37:32 crc kubenswrapper[4967]: W1121 15:37:32.331527 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1def5571_fff5_47d8_b9bd_13ee21c73760.slice/crio-68e789e782defb92a21d8a3484d33dbefe1ab20515554d088d63ec798724e58b WatchSource:0}: Error finding container 68e789e782defb92a21d8a3484d33dbefe1ab20515554d088d63ec798724e58b: Status 404 returned error can't find the container with id 68e789e782defb92a21d8a3484d33dbefe1ab20515554d088d63ec798724e58b Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.341149 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.360056 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.402495 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsxrn\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-kube-api-access-gsxrn\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.417990 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbfjz\" (UniqueName: \"kubernetes.io/projected/e1b88a76-dfa5-4273-a7ab-6c0824308b04-kube-api-access-fbfjz\") pod \"console-operator-58897d9998-x7hlr\" (UID: \"e1b88a76-dfa5-4273-a7ab-6c0824308b04\") " pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.436752 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lb5zz\" (UniqueName: \"kubernetes.io/projected/944c0231-2382-4fc2-9e88-e83b473045f8-kube-api-access-lb5zz\") pod \"apiserver-7bbb656c7d-nkdl7\" (UID: \"944c0231-2382-4fc2-9e88-e83b473045f8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.439054 4967 request.go:700] Waited for 1.888902858s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-apiserver/serviceaccounts/openshift-apiserver-sa/token Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.456791 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j65nh\" (UniqueName: \"kubernetes.io/projected/28189f52-065e-4d6f-b959-6a052477f10f-kube-api-access-j65nh\") pod \"apiserver-76f77b778f-km26f\" (UID: \"28189f52-065e-4d6f-b959-6a052477f10f\") " pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.476845 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvn6s\" (UniqueName: \"kubernetes.io/projected/aba5ef1a-6979-4625-af79-f51e8970c06a-kube-api-access-bvn6s\") pod \"authentication-operator-69f744f599-zcvrf\" (UID: \"aba5ef1a-6979-4625-af79-f51e8970c06a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.484337 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.497935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/79cf3908-cb05-42a4-afd4-6e529e7d586d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-q4dqq\" (UID: \"79cf3908-cb05-42a4-afd4-6e529e7d586d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.516374 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmvzd\" (UniqueName: \"kubernetes.io/projected/91fcf3a6-63c0-41f9-a864-95f15bc9bbe6-kube-api-access-xmvzd\") pod \"olm-operator-6b444d44fb-k6mnj\" (UID: \"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.541342 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f5wd\" (UniqueName: \"kubernetes.io/projected/4c12eeb4-d087-4e18-a9b0-0a2211a6128d-kube-api-access-5f5wd\") pod \"downloads-7954f5f757-tdgx7\" (UID: \"4c12eeb4-d087-4e18-a9b0-0a2211a6128d\") " pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.562944 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6cz4\" (UniqueName: \"kubernetes.io/projected/3874fcd1-fa6e-4b2c-b9e3-ce42c0275521-kube-api-access-f6cz4\") pod \"openshift-config-operator-7777fb866f-9pz5w\" (UID: \"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.579753 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.584148 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vb2fv\" (UniqueName: \"kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv\") pod \"route-controller-manager-6576b87f9c-v9g6l\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.593161 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.598009 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcf2z\" (UniqueName: \"kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z\") pod \"oauth-openshift-558db77b4-lb8zd\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.610109 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.619553 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.619575 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nllg\" (UniqueName: \"kubernetes.io/projected/70fb4095-863d-445d-bc3a-bdb264c4abc1-kube-api-access-5nllg\") pod \"machine-api-operator-5694c8668f-882pz\" (UID: \"70fb4095-863d-445d-bc3a-bdb264c4abc1\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.641849 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.642552 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkxt4\" (UniqueName: \"kubernetes.io/projected/a02f9e5c-5484-4406-89c3-f7803420a47e-kube-api-access-tkxt4\") pod \"openshift-apiserver-operator-796bbdcf4f-gljzt\" (UID: \"a02f9e5c-5484-4406-89c3-f7803420a47e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.649317 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.662112 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkqw7\" (UniqueName: \"kubernetes.io/projected/5253a6fb-0e1a-4441-9a5a-24682a098f45-kube-api-access-kkqw7\") pod \"openshift-controller-manager-operator-756b6f6bc6-c5fls\" (UID: \"5253a6fb-0e1a-4441-9a5a-24682a098f45\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.697529 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.699938 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4lw4\" (UniqueName: \"kubernetes.io/projected/a515cddd-29b9-4ced-afe4-cb52ead0fa58-kube-api-access-s4lw4\") pod \"machine-config-controller-84d6567774-sxsl5\" (UID: \"a515cddd-29b9-4ced-afe4-cb52ead0fa58\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.730046 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.737125 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxwsn\" (UniqueName: \"kubernetes.io/projected/659073fe-e665-4953-98b7-fe8e6ac5e075-kube-api-access-mxwsn\") pod \"packageserver-d55dfcdfc-2bjhd\" (UID: \"659073fe-e665-4953-98b7-fe8e6ac5e075\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.750921 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.754089 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqgpn\" (UniqueName: \"kubernetes.io/projected/52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e-kube-api-access-pqgpn\") pod \"etcd-operator-b45778765-mv55f\" (UID: \"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.773374 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n7xm\" (UniqueName: \"kubernetes.io/projected/228df940-1987-44bb-a72e-944bafc22d91-kube-api-access-7n7xm\") pod \"migrator-59844c95c7-q2z9m\" (UID: \"228df940-1987-44bb-a72e-944bafc22d91\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.782449 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15dd7d8c-fd82-4a6b-a749-8182841d9db9-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-l6k28\" (UID: \"15dd7d8c-fd82-4a6b-a749-8182841d9db9\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.787086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.801945 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk2sx\" (UniqueName: \"kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx\") pod \"marketplace-operator-79b997595-ql9tj\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.803435 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.809651 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.822287 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szqsl\" (UniqueName: \"kubernetes.io/projected/67ec7435-1c30-438c-8da5-8231ab6cf336-kube-api-access-szqsl\") pod \"catalog-operator-68c6474976-2bzth\" (UID: \"67ec7435-1c30-438c-8da5-8231ab6cf336\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.824394 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.827503 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.832466 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.840999 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.846573 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cdjk\" (UniqueName: \"kubernetes.io/projected/dc8caa89-0bb5-4c35-9bbe-f1212a715f59-kube-api-access-7cdjk\") pod \"service-ca-9c57cc56f-hlspg\" (UID: \"dc8caa89-0bb5-4c35-9bbe-f1212a715f59\") " pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.862293 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/67563d08-de10-4aac-bfdd-248379b85548-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-xdn6j\" (UID: \"67563d08-de10-4aac-bfdd-248379b85548\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.865658 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-tdgx7"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.868599 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.893802 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.900400 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.900445 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82dlq\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.900489 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-metrics-certs\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.900689 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-metrics-tls\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901584 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x7hlr"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901732 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-stats-auth\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901758 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2ae3907-0438-4add-bdcf-0045ae419f0a-metrics-tls\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901839 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx58s\" (UniqueName: \"kubernetes.io/projected/66e9814a-2c50-4b11-9412-39e1fd445bc6-kube-api-access-mx58s\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901881 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4rmp\" (UniqueName: \"kubernetes.io/projected/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-kube-api-access-c4rmp\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901908 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4l4f\" (UniqueName: \"kubernetes.io/projected/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-kube-api-access-s4l4f\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.901932 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c191759b-e8ae-49d4-b2a1-b7acb2a54709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902026 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-images\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902077 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902419 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hn8k\" (UniqueName: \"kubernetes.io/projected/5c2198a6-561a-407b-979b-67d05acfb234-kube-api-access-9hn8k\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902452 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hgxn\" (UniqueName: \"kubernetes.io/projected/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-kube-api-access-5hgxn\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902480 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrbxj\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-kube-api-access-mrbxj\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902508 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902534 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szh2n\" (UniqueName: \"kubernetes.io/projected/569fa728-8c24-4618-9655-612ea2297aad-kube-api-access-szh2n\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902649 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902702 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-default-certificate\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.902724 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c191759b-e8ae-49d4-b2a1-b7acb2a54709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:32 crc kubenswrapper[4967]: E1121 15:37:32.903082 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.403067538 +0000 UTC m=+141.661588546 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.905234 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.905274 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-config-volume\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.907567 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58762\" (UniqueName: \"kubernetes.io/projected/b2ae3907-0438-4add-bdcf-0045ae419f0a-kube-api-access-58762\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.908199 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6slv\" (UniqueName: \"kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.908505 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.908565 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2198a6-561a-407b-979b-67d05acfb234-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.909043 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-service-ca-bundle\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.909092 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d1972c2-1c3e-4e33-b70d-52fe89843453-metrics-tls\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.909119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.909164 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jrhg\" (UniqueName: \"kubernetes.io/projected/54732f6f-2547-48d2-bb98-2e90611b48bf-kube-api-access-2jrhg\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.910624 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54732f6f-2547-48d2-bb98-2e90611b48bf-proxy-tls\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.910703 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d1972c2-1c3e-4e33-b70d-52fe89843453-trusted-ca\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.911369 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c191759b-e8ae-49d4-b2a1-b7acb2a54709-config\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912207 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912390 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/569fa728-8c24-4618-9655-612ea2297aad-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912440 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912465 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9814a-2c50-4b11-9412-39e1fd445bc6-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912510 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912535 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.912610 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.913123 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.913346 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7"] Nov 21 15:37:32 crc kubenswrapper[4967]: I1121 15:37:32.933556 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" Nov 21 15:37:32 crc kubenswrapper[4967]: W1121 15:37:32.936092 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c12eeb4_d087_4e18_a9b0_0a2211a6128d.slice/crio-fc8b2e6c9a3ebc0c1e031228ca19132bcb8662438b8c017902fe63f90b438559 WatchSource:0}: Error finding container fc8b2e6c9a3ebc0c1e031228ca19132bcb8662438b8c017902fe63f90b438559: Status 404 returned error can't find the container with id fc8b2e6c9a3ebc0c1e031228ca19132bcb8662438b8c017902fe63f90b438559 Nov 21 15:37:32 crc kubenswrapper[4967]: W1121 15:37:32.945856 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1b88a76_dfa5_4273_a7ab_6c0824308b04.slice/crio-eeb2b4908b2ceb5ea60f9dab75e4650d2529a4f17eab96703a03acf2dbe41133 WatchSource:0}: Error finding container eeb2b4908b2ceb5ea60f9dab75e4650d2529a4f17eab96703a03acf2dbe41133: Status 404 returned error can't find the container with id eeb2b4908b2ceb5ea60f9dab75e4650d2529a4f17eab96703a03acf2dbe41133 Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.014778 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015000 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54732f6f-2547-48d2-bb98-2e90611b48bf-proxy-tls\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015039 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d1972c2-1c3e-4e33-b70d-52fe89843453-trusted-ca\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015070 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c191759b-e8ae-49d4-b2a1-b7acb2a54709-config\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015103 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015131 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-mountpoint-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015147 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-registration-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015179 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-node-bootstrap-token\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015222 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-certs\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015243 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/569fa728-8c24-4618-9655-612ea2297aad-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015262 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015282 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9814a-2c50-4b11-9412-39e1fd445bc6-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015329 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-csi-data-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015349 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015366 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015406 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015426 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82dlq\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015609 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-metrics-certs\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015664 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-metrics-tls\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015714 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-stats-auth\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015750 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2ae3907-0438-4add-bdcf-0045ae419f0a-metrics-tls\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015782 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z7jh\" (UniqueName: \"kubernetes.io/projected/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-kube-api-access-4z7jh\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015862 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx58s\" (UniqueName: \"kubernetes.io/projected/66e9814a-2c50-4b11-9412-39e1fd445bc6-kube-api-access-mx58s\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015908 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4rmp\" (UniqueName: \"kubernetes.io/projected/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-kube-api-access-c4rmp\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015947 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4l4f\" (UniqueName: \"kubernetes.io/projected/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-kube-api-access-s4l4f\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015968 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c191759b-e8ae-49d4-b2a1-b7acb2a54709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.015987 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-cert\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016035 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-images\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016056 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-socket-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016126 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hgxn\" (UniqueName: \"kubernetes.io/projected/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-kube-api-access-5hgxn\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016146 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrbxj\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-kube-api-access-mrbxj\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016170 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkrzr\" (UniqueName: \"kubernetes.io/projected/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-kube-api-access-jkrzr\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016190 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hn8k\" (UniqueName: \"kubernetes.io/projected/5c2198a6-561a-407b-979b-67d05acfb234-kube-api-access-9hn8k\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016213 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.016232 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.018089 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.019583 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-images\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.019981 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szh2n\" (UniqueName: \"kubernetes.io/projected/569fa728-8c24-4618-9655-612ea2297aad-kube-api-access-szh2n\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020143 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-plugins-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020164 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/569fa728-8c24-4618-9655-612ea2297aad-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020182 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020367 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020760 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-default-certificate\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020785 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c191759b-e8ae-49d4-b2a1-b7acb2a54709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.020832 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwfvt\" (UniqueName: \"kubernetes.io/projected/a8970b78-e931-4617-9b58-6845c95c48f4-kube-api-access-qwfvt\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.020911 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.52089376 +0000 UTC m=+141.779414768 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.022275 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.023891 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.026019 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c191759b-e8ae-49d4-b2a1-b7acb2a54709-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027707 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-config-volume\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027616 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6d1972c2-1c3e-4e33-b70d-52fe89843453-trusted-ca\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027644 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027764 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phnnf\" (UniqueName: \"kubernetes.io/projected/ad8312ab-9169-4277-99d0-2525382013b7-kube-api-access-phnnf\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027899 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c191759b-e8ae-49d4-b2a1-b7acb2a54709-config\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.027280 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-stats-auth\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.028345 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58762\" (UniqueName: \"kubernetes.io/projected/b2ae3907-0438-4add-bdcf-0045ae419f0a-kube-api-access-58762\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.028483 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/54732f6f-2547-48d2-bb98-2e90611b48bf-proxy-tls\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.028844 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-metrics-tls\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.029009 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6slv\" (UniqueName: \"kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.029284 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.029492 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.033781 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.037490 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.037534 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-metrics-certs\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.039334 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b2ae3907-0438-4add-bdcf-0045ae419f0a-metrics-tls\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.041590 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82dlq\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.041659 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-default-certificate\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.044403 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045174 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2198a6-561a-407b-979b-67d05acfb234-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8312ab-9169-4277-99d0-2525382013b7-serving-cert\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045287 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-service-ca-bundle\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d1972c2-1c3e-4e33-b70d-52fe89843453-metrics-tls\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045373 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045405 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jrhg\" (UniqueName: \"kubernetes.io/projected/54732f6f-2547-48d2-bb98-2e90611b48bf-kube-api-access-2jrhg\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.045551 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad8312ab-9169-4277-99d0-2525382013b7-config\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.047086 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-service-ca-bundle\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.051686 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5c2198a6-561a-407b-979b-67d05acfb234-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.052031 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.052234 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/66e9814a-2c50-4b11-9412-39e1fd445bc6-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.056534 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-config-volume\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.057251 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/54732f6f-2547-48d2-bb98-2e90611b48bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.080035 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zcvrf"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.081147 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.093921 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6d1972c2-1c3e-4e33-b70d-52fe89843453-metrics-tls\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.097077 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrbxj\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-kube-api-access-mrbxj\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.105707 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4rmp\" (UniqueName: \"kubernetes.io/projected/d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d-kube-api-access-c4rmp\") pod \"router-default-5444994796-gwx9x\" (UID: \"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d\") " pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.117788 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c191759b-e8ae-49d4-b2a1-b7acb2a54709-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-szs8c\" (UID: \"c191759b-e8ae-49d4-b2a1-b7acb2a54709\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149108 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8312ab-9169-4277-99d0-2525382013b7-serving-cert\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149203 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad8312ab-9169-4277-99d0-2525382013b7-config\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149264 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-registration-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149299 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-mountpoint-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149355 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-node-bootstrap-token\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149769 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-certs\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149811 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-csi-data-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149897 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z7jh\" (UniqueName: \"kubernetes.io/projected/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-kube-api-access-4z7jh\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149950 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-cert\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.149995 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-socket-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.150059 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.150386 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkrzr\" (UniqueName: \"kubernetes.io/projected/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-kube-api-access-jkrzr\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.150449 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-plugins-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.150597 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwfvt\" (UniqueName: \"kubernetes.io/projected/a8970b78-e931-4617-9b58-6845c95c48f4-kube-api-access-qwfvt\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.150674 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phnnf\" (UniqueName: \"kubernetes.io/projected/ad8312ab-9169-4277-99d0-2525382013b7-kube-api-access-phnnf\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.157305 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-mountpoint-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.157498 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad8312ab-9169-4277-99d0-2525382013b7-config\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.157759 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-registration-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.158292 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-socket-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.158403 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.658382054 +0000 UTC m=+141.916903062 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.158430 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-csi-data-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.158508 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-plugins-dir\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.160141 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hn8k\" (UniqueName: \"kubernetes.io/projected/5c2198a6-561a-407b-979b-67d05acfb234-kube-api-access-9hn8k\") pod \"control-plane-machine-set-operator-78cbb6b69f-tz287\" (UID: \"5c2198a6-561a-407b-979b-67d05acfb234\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.163163 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-certs\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.184740 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.185510 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.192363 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a8970b78-e931-4617-9b58-6845c95c48f4-node-bootstrap-token\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.195805 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4l4f\" (UniqueName: \"kubernetes.io/projected/22aa627b-e824-4f54-8ee9-e9db1e7b7da3-kube-api-access-s4l4f\") pod \"kube-storage-version-migrator-operator-b67b599dd-92chf\" (UID: \"22aa627b-e824-4f54-8ee9-e9db1e7b7da3\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.203386 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szh2n\" (UniqueName: \"kubernetes.io/projected/569fa728-8c24-4618-9655-612ea2297aad-kube-api-access-szh2n\") pod \"multus-admission-controller-857f4d67dd-6g4w8\" (UID: \"569fa728-8c24-4618-9655-612ea2297aad\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.210725 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6d1972c2-1c3e-4e33-b70d-52fe89843453-bound-sa-token\") pod \"ingress-operator-5b745b69d9-w46k5\" (UID: \"6d1972c2-1c3e-4e33-b70d-52fe89843453\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.211525 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.216480 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.219536 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.223251 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8312ab-9169-4277-99d0-2525382013b7-serving-cert\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.223384 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-cert\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.256104 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.257046 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.757021657 +0000 UTC m=+142.015542665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.257266 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-km26f"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.269339 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx58s\" (UniqueName: \"kubernetes.io/projected/66e9814a-2c50-4b11-9412-39e1fd445bc6-kube-api-access-mx58s\") pod \"package-server-manager-789f6589d5-8r5qd\" (UID: \"66e9814a-2c50-4b11-9412-39e1fd445bc6\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.277496 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hgxn\" (UniqueName: \"kubernetes.io/projected/ef9d0064-2b4f-416e-8300-8bdf07e2bd61-kube-api-access-5hgxn\") pod \"dns-default-dmk5z\" (UID: \"ef9d0064-2b4f-416e-8300-8bdf07e2bd61\") " pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.279723 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.294769 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58762\" (UniqueName: \"kubernetes.io/projected/b2ae3907-0438-4add-bdcf-0045ae419f0a-kube-api-access-58762\") pod \"dns-operator-744455d44c-j66mb\" (UID: \"b2ae3907-0438-4add-bdcf-0045ae419f0a\") " pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.301699 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6slv\" (UniqueName: \"kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv\") pod \"collect-profiles-29395650-txjsl\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.310530 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.314993 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m45jq" event={"ID":"daf11197-7c7a-4a0e-8c7d-de8047b53fe7","Type":"ContainerStarted","Data":"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.315047 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m45jq" event={"ID":"daf11197-7c7a-4a0e-8c7d-de8047b53fe7","Type":"ContainerStarted","Data":"f2668dfc7794650b5f8d3135c4bd20d6eadf4571c225176addad9333497be53e"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.341027 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jrhg\" (UniqueName: \"kubernetes.io/projected/54732f6f-2547-48d2-bb98-2e90611b48bf-kube-api-access-2jrhg\") pod \"machine-config-operator-74547568cd-t97cr\" (UID: \"54732f6f-2547-48d2-bb98-2e90611b48bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.341865 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.358394 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" event={"ID":"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d","Type":"ContainerStarted","Data":"ac40157dd6166dfdc6981a6b93306323a080cc547faa6d47e806df8ad2847834"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.358475 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" event={"ID":"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d","Type":"ContainerStarted","Data":"737e7a61a40afde127049854ca73d53fa4f204ed44c613d0768d25d0958a4043"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.358487 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" event={"ID":"51f4aa6d-cb8f-4b69-a8f8-61fd52bf3c1d","Type":"ContainerStarted","Data":"50a7ea2eeb50c46c4cf2b5af5bc5875b041c6de52ef5f4aa205a30674c805974"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.360299 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.361431 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.861404908 +0000 UTC m=+142.119925916 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.361692 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.369529 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z7jh\" (UniqueName: \"kubernetes.io/projected/7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2-kube-api-access-4z7jh\") pod \"csi-hostpathplugin-x2cf9\" (UID: \"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2\") " pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.383093 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" event={"ID":"e1b88a76-dfa5-4273-a7ab-6c0824308b04","Type":"ContainerStarted","Data":"eeb2b4908b2ceb5ea60f9dab75e4650d2529a4f17eab96703a03acf2dbe41133"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.386898 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkrzr\" (UniqueName: \"kubernetes.io/projected/3b7bdc2e-d91d-420e-b70b-445ca8c31faf-kube-api-access-jkrzr\") pod \"ingress-canary-dgdjr\" (UID: \"3b7bdc2e-d91d-420e-b70b-445ca8c31faf\") " pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.397006 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.426918 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" event={"ID":"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6","Type":"ContainerStarted","Data":"e0119e5372f2b94aa2bf0d0d79a9e00eaeefb2e456155260425da460d89193ef"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.431392 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tdgx7" event={"ID":"4c12eeb4-d087-4e18-a9b0-0a2211a6128d","Type":"ContainerStarted","Data":"fc8b2e6c9a3ebc0c1e031228ca19132bcb8662438b8c017902fe63f90b438559"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.434890 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phnnf\" (UniqueName: \"kubernetes.io/projected/ad8312ab-9169-4277-99d0-2525382013b7-kube-api-access-phnnf\") pod \"service-ca-operator-777779d784-zl28t\" (UID: \"ad8312ab-9169-4277-99d0-2525382013b7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.435561 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.441676 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwfvt\" (UniqueName: \"kubernetes.io/projected/a8970b78-e931-4617-9b58-6845c95c48f4-kube-api-access-qwfvt\") pod \"machine-config-server-g9gvx\" (UID: \"a8970b78-e931-4617-9b58-6845c95c48f4\") " pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.444095 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.447056 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" event={"ID":"1def5571-fff5-47d8-b9bd-13ee21c73760","Type":"ContainerStarted","Data":"4c95e49451563c84ef0984e57a9359488f85dfd2faeedd8070417de4f98e2c5a"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.447122 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" event={"ID":"1def5571-fff5-47d8-b9bd-13ee21c73760","Type":"ContainerStarted","Data":"68e789e782defb92a21d8a3484d33dbefe1ab20515554d088d63ec798724e58b"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.447475 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.455711 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.457863 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" event={"ID":"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c","Type":"ContainerStarted","Data":"a436d391640747efc05bd8d991d412d1d5e0a9976c5314c3cc2b1428df8624b3"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.457926 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" event={"ID":"52d8fd4a-44ee-47d6-8f16-2b41728f7f1c","Type":"ContainerStarted","Data":"ea2fa2a9525d44ae94506c3357ab64a9226e420e84a99ef6f0c20267a860bd60"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.462916 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.463409 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.963376758 +0000 UTC m=+142.221897776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.463798 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.466416 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:33.966402066 +0000 UTC m=+142.224923064 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.472268 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" event={"ID":"944c0231-2382-4fc2-9e88-e83b473045f8","Type":"ContainerStarted","Data":"5c67a4dc9ce69cdcff6e451aacf738ff2c27308908d6211365f69bbe85631a6e"} Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.475149 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.502910 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.516966 4967 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-4wxnm container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.517027 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.527796 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.531680 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-dgdjr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.540115 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-g9gvx" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.567124 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.569352 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.069302213 +0000 UTC m=+142.327823221 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.570287 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.570774 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.573787 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mv55f"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.623860 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.670500 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.670975 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.170962254 +0000 UTC m=+142.429483262 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.706332 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.778537 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.779218 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.279193476 +0000 UTC m=+142.537714484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: W1121 15:37:33.795594 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod228df940_1987_44bb_a72e_944bafc22d91.slice/crio-5894efe29da0a158c07525d65dedac4d4dcd125983ac552768f8b92ffc089560 WatchSource:0}: Error finding container 5894efe29da0a158c07525d65dedac4d4dcd125983ac552768f8b92ffc089560: Status 404 returned error can't find the container with id 5894efe29da0a158c07525d65dedac4d4dcd125983ac552768f8b92ffc089560 Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.815585 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w"] Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.885058 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.886073 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.386055429 +0000 UTC m=+142.644576437 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:33 crc kubenswrapper[4967]: I1121 15:37:33.993458 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:33 crc kubenswrapper[4967]: E1121 15:37:33.993839 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.493822458 +0000 UTC m=+142.752343466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.011758 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-dd6s7" podStartSLOduration=118.01173537 podStartE2EDuration="1m58.01173537s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:33.970846049 +0000 UTC m=+142.229367057" watchObservedRunningTime="2025-11-21 15:37:34.01173537 +0000 UTC m=+142.270256378" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.096048 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.096462 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.596450137 +0000 UTC m=+142.854971145 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.136910 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.175547 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.197755 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.197837 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" podStartSLOduration=118.1978121 podStartE2EDuration="1m58.1978121s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:34.19646265 +0000 UTC m=+142.454983658" watchObservedRunningTime="2025-11-21 15:37:34.1978121 +0000 UTC m=+142.456333108" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.198223 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.698200051 +0000 UTC m=+142.956721059 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.290220 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-m45jq" podStartSLOduration=118.290201631 podStartE2EDuration="1m58.290201631s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:34.284871565 +0000 UTC m=+142.543392573" watchObservedRunningTime="2025-11-21 15:37:34.290201631 +0000 UTC m=+142.548722639" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.299515 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.299934 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.799920714 +0000 UTC m=+143.058441712 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.402241 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.402720 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.902670736 +0000 UTC m=+143.161191744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.403302 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.403752 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:34.903738878 +0000 UTC m=+143.162260056 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.504667 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.505109 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.005069189 +0000 UTC m=+143.263590207 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.523747 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-w6tpt" podStartSLOduration=118.523721142 podStartE2EDuration="1m58.523721142s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:34.517030937 +0000 UTC m=+142.775551945" watchObservedRunningTime="2025-11-21 15:37:34.523721142 +0000 UTC m=+142.782242150" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.524692 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" event={"ID":"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e","Type":"ContainerStarted","Data":"2d9594b8a423498e5bc3fd32d448ee64dc9007ec94dddaf14e1b4629908157f5"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.607225 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.607783 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.10776739 +0000 UTC m=+143.366288398 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.643840 4967 patch_prober.go:28] interesting pod/console-operator-58897d9998-x7hlr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.643914 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" podUID="e1b88a76-dfa5-4273-a7ab-6c0824308b04" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.653196 4967 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-v9g6l container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.653258 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.682865 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.682953 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.713141 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.715164 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.214803218 +0000 UTC m=+143.473324226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.728928 4967 generic.go:334] "Generic (PLEG): container finished" podID="944c0231-2382-4fc2-9e88-e83b473045f8" containerID="e623b7e178938cb9a2dad3f9e973144e80b4b0bb7bcb3418a88a673cfbb8413c" exitCode=0 Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.750882 4967 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-k6mnj container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.751013 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" podUID="91fcf3a6-63c0-41f9-a864-95f15bc9bbe6" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811515 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811560 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811573 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811583 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811613 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811626 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-km26f" event={"ID":"28189f52-065e-4d6f-b959-6a052477f10f","Type":"ContainerStarted","Data":"9336af3eeb5da89a22954f0b30692e3bd68c4b825eacb393196c8e5a24696a2e"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811664 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" event={"ID":"a02f9e5c-5484-4406-89c3-f7803420a47e","Type":"ContainerStarted","Data":"5ff73fb75c8b192c738d71feb2df1a73a8437071a3f15841d5768caada71fb2a"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811686 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" event={"ID":"79cf3908-cb05-42a4-afd4-6e529e7d586d","Type":"ContainerStarted","Data":"f49ef1823699e29a237b805988e733a1544665ffa619ce0be68ba5bc96de41cd"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811701 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" event={"ID":"79cf3908-cb05-42a4-afd4-6e529e7d586d","Type":"ContainerStarted","Data":"cf75af739be50397febf51c19684afe2580eb1dfd7d3607893e108aff750d9d9"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811717 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" event={"ID":"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521","Type":"ContainerStarted","Data":"fbba3cbcdb7034dbc8f40e4ab1662aadf68455dcdaf26daba9ede839e6dccaa0"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811730 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" event={"ID":"aba5ef1a-6979-4625-af79-f51e8970c06a","Type":"ContainerStarted","Data":"0d1dbb6ab21a089b29659d4759758fb451abd1e4e91fbb777e8501eeeb4b861f"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811745 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hlspg"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811762 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811775 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-882pz"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811789 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811801 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" event={"ID":"aba5ef1a-6979-4625-af79-f51e8970c06a","Type":"ContainerStarted","Data":"da7a981cb61205095808c7032c82901d24c752c69954b030b5d3f260df5d3e5f"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811813 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" event={"ID":"e1b88a76-dfa5-4273-a7ab-6c0824308b04","Type":"ContainerStarted","Data":"6c5dbbd6d90050ee3a28dacfd334cc2d8208a8ea820379ac07cdcd4847031053"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811828 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" event={"ID":"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807","Type":"ContainerStarted","Data":"d97ba222b2d09fcffb95a8b1570d1b6715a8a80dc93a3cab2c934684e55889ce"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811842 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" event={"ID":"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807","Type":"ContainerStarted","Data":"1d69dc902918170b08c111beb5f0c739d5a0d7c1f33c567a550a67ac6f1ac57c"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811853 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-tdgx7" event={"ID":"4c12eeb4-d087-4e18-a9b0-0a2211a6128d","Type":"ContainerStarted","Data":"26314c8c6c161882632fa288da261a525630f5104d595d98e07e839ecc09306c"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811865 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" event={"ID":"228df940-1987-44bb-a72e-944bafc22d91","Type":"ContainerStarted","Data":"5894efe29da0a158c07525d65dedac4d4dcd125983ac552768f8b92ffc089560"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811879 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-g9gvx" event={"ID":"a8970b78-e931-4617-9b58-6845c95c48f4","Type":"ContainerStarted","Data":"0cd6f012382e8f12fb1674fef8f030197e7c22c21973a15d55e1eb736254cc05"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" event={"ID":"944c0231-2382-4fc2-9e88-e83b473045f8","Type":"ContainerDied","Data":"e623b7e178938cb9a2dad3f9e973144e80b4b0bb7bcb3418a88a673cfbb8413c"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811908 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" event={"ID":"91fcf3a6-63c0-41f9-a864-95f15bc9bbe6","Type":"ContainerStarted","Data":"bfc9ea67d798e9c76503c6a93aad696ab1809c5ceefe61964457f6a578ea7e08"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811919 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" event={"ID":"a515cddd-29b9-4ced-afe4-cb52ead0fa58","Type":"ContainerStarted","Data":"bf737b24f46e3f80c020d7fcad5bdb894d9ee2990f762d68510790bc453de6d1"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811928 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" event={"ID":"a515cddd-29b9-4ced-afe4-cb52ead0fa58","Type":"ContainerStarted","Data":"358a8271c19cd88e001b4ac67296facdfde14e9afd4d636021658923c4eaa046"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811936 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" event={"ID":"396d79a1-4427-49b2-b16e-89fb27df71ec","Type":"ContainerStarted","Data":"8b75d047d039c13a59c4f0500a4e00bc8007fd245afd8e2482178dc5748ac70c"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811946 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gwx9x" event={"ID":"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d","Type":"ContainerStarted","Data":"87a9bf8a79481d4ef5b7b1472499531733b27bb35ccab6a73f0911c0c458c11b"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.811956 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" event={"ID":"67ec7435-1c30-438c-8da5-8231ab6cf336","Type":"ContainerStarted","Data":"6cf90753871fb10f346c504e627cdf3036d0db44b5c010d8fdd4fee8781277cc"} Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.816673 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28"] Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.817941 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.819709 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.319684053 +0000 UTC m=+143.578205061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.919456 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.926558 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.426526795 +0000 UTC m=+143.685047803 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:34 crc kubenswrapper[4967]: I1121 15:37:34.927826 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:34 crc kubenswrapper[4967]: E1121 15:37:34.931849 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.431813159 +0000 UTC m=+143.690334167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.032069 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.032580 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.532550663 +0000 UTC m=+143.791071671 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.045667 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.135443 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.136123 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.636098909 +0000 UTC m=+143.894619917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.208642 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-tdgx7" podStartSLOduration=119.208617481 podStartE2EDuration="1m59.208617481s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.19209961 +0000 UTC m=+143.450620618" watchObservedRunningTime="2025-11-21 15:37:35.208617481 +0000 UTC m=+143.467138519" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.236701 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" podStartSLOduration=118.236677448 podStartE2EDuration="1m58.236677448s" podCreationTimestamp="2025-11-21 15:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.226570374 +0000 UTC m=+143.485091382" watchObservedRunningTime="2025-11-21 15:37:35.236677448 +0000 UTC m=+143.495198456" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.237239 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.237571 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.737524493 +0000 UTC m=+143.996045491 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.237807 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: W1121 15:37:35.241364 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67563d08_de10_4aac_bfdd_248379b85548.slice/crio-f21286e59e57d13559f2432ac68e05e381f18765596824ca9e043ef8d7f420dd WatchSource:0}: Error finding container f21286e59e57d13559f2432ac68e05e381f18765596824ca9e043ef8d7f420dd: Status 404 returned error can't find the container with id f21286e59e57d13559f2432ac68e05e381f18765596824ca9e043ef8d7f420dd Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.242218 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.742200719 +0000 UTC m=+144.000721727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.264213 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.325257 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-zcvrf" podStartSLOduration=119.325235338 podStartE2EDuration="1m59.325235338s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.317781721 +0000 UTC m=+143.576302729" watchObservedRunningTime="2025-11-21 15:37:35.325235338 +0000 UTC m=+143.583756346" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.343131 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.346762 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.846740684 +0000 UTC m=+144.105261692 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.391198 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" podStartSLOduration=119.391174058 podStartE2EDuration="1m59.391174058s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.37682498 +0000 UTC m=+143.635345998" watchObservedRunningTime="2025-11-21 15:37:35.391174058 +0000 UTC m=+143.649695066" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.398199 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.452435 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.452828 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:35.952814184 +0000 UTC m=+144.211335192 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.456966 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-6g4w8"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.548195 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-j66mb"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.548282 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.553513 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.553993 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.05397607 +0000 UTC m=+144.312497078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.565144 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.566808 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" podStartSLOduration=119.566783273 podStartE2EDuration="1m59.566783273s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.493683654 +0000 UTC m=+143.752204662" watchObservedRunningTime="2025-11-21 15:37:35.566783273 +0000 UTC m=+143.825304291" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.585499 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-q4dqq" podStartSLOduration=119.585472428 podStartE2EDuration="1m59.585472428s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.537420338 +0000 UTC m=+143.795941346" watchObservedRunningTime="2025-11-21 15:37:35.585472428 +0000 UTC m=+143.843993436" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.589615 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-zl28t"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.605200 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.613574 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.629096 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-dmk5z"] Nov 21 15:37:35 crc kubenswrapper[4967]: W1121 15:37:35.642189 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22aa627b_e824_4f54_8ee9_e9db1e7b7da3.slice/crio-ae34939903bf96cbbf58736f304bd56b7290f6875782edd65ed2b03f0b9da67c WatchSource:0}: Error finding container ae34939903bf96cbbf58736f304bd56b7290f6875782edd65ed2b03f0b9da67c: Status 404 returned error can't find the container with id ae34939903bf96cbbf58736f304bd56b7290f6875782edd65ed2b03f0b9da67c Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.661976 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.662516 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.162501271 +0000 UTC m=+144.421022279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.687718 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-dgdjr"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.762966 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.763228 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.263193374 +0000 UTC m=+144.521714382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.763502 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.764045 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.264036909 +0000 UTC m=+144.522557907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.802143 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" event={"ID":"70fb4095-863d-445d-bc3a-bdb264c4abc1","Type":"ContainerStarted","Data":"b13ceb068949e6a1ce6b957d9c8de2ae28f6e83128c26b5f430a6ff5a07da5c2"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.818063 4967 generic.go:334] "Generic (PLEG): container finished" podID="28189f52-065e-4d6f-b959-6a052477f10f" containerID="0eeb69cdbb62ee844cc08e51913ee354c8f7d8f7c40b6a65a49bbadf602b3235" exitCode=0 Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.818186 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-km26f" event={"ID":"28189f52-065e-4d6f-b959-6a052477f10f","Type":"ContainerDied","Data":"0eeb69cdbb62ee844cc08e51913ee354c8f7d8f7c40b6a65a49bbadf602b3235"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.824131 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" event={"ID":"dc8caa89-0bb5-4c35-9bbe-f1212a715f59","Type":"ContainerStarted","Data":"0ca7b432eefddf473c74f179656b7755b9a34dc68de6670648f5a0cf77508b0c"} Nov 21 15:37:35 crc kubenswrapper[4967]: W1121 15:37:35.833065 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef9d0064_2b4f_416e_8300_8bdf07e2bd61.slice/crio-63b30bed1fd34ea0675cdd0004575b73bbf01037e6346ad2bafdf332686923ed WatchSource:0}: Error finding container 63b30bed1fd34ea0675cdd0004575b73bbf01037e6346ad2bafdf332686923ed: Status 404 returned error can't find the container with id 63b30bed1fd34ea0675cdd0004575b73bbf01037e6346ad2bafdf332686923ed Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.836832 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" event={"ID":"22aa627b-e824-4f54-8ee9-e9db1e7b7da3","Type":"ContainerStarted","Data":"ae34939903bf96cbbf58736f304bd56b7290f6875782edd65ed2b03f0b9da67c"} Nov 21 15:37:35 crc kubenswrapper[4967]: W1121 15:37:35.841414 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b7bdc2e_d91d_420e_b70b_445ca8c31faf.slice/crio-7c7705c1c472843c05f57778bc7c08ffa6886a291a2853aecd23d2eedc0eb4d2 WatchSource:0}: Error finding container 7c7705c1c472843c05f57778bc7c08ffa6886a291a2853aecd23d2eedc0eb4d2: Status 404 returned error can't find the container with id 7c7705c1c472843c05f57778bc7c08ffa6886a291a2853aecd23d2eedc0eb4d2 Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.841876 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" event={"ID":"5253a6fb-0e1a-4441-9a5a-24682a098f45","Type":"ContainerStarted","Data":"7ae7797f1d31cf37b23b8fd70fb6f507f4b3ca7082ab6efd0506404f39ec0aa2"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.843237 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" event={"ID":"c191759b-e8ae-49d4-b2a1-b7acb2a54709","Type":"ContainerStarted","Data":"992d032906258afce29dce545df0ea42f6a27d0910893851a4c4c71f57b05eef"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.844737 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.849167 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" event={"ID":"a515cddd-29b9-4ced-afe4-cb52ead0fa58","Type":"ContainerStarted","Data":"7056c92e5749c20e4a0001b34f7e972242847c4c15f66e825d31ebd6f70e12de"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.859013 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-x2cf9"] Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.860639 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" event={"ID":"15dd7d8c-fd82-4a6b-a749-8182841d9db9","Type":"ContainerStarted","Data":"afeff53bc33c4f15cef6af942585c93834c125a32ff9345ed07e024d30bf347d"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.867503 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.867662 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.367637566 +0000 UTC m=+144.626158574 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.867949 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.869303 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" event={"ID":"569fa728-8c24-4618-9655-612ea2297aad","Type":"ContainerStarted","Data":"b97d66b09b11acd8b16e63e730643fcbf6d8c5f750fecf81a4828e4bf2b7ea71"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.870885 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" event={"ID":"d72fe727-d902-4315-afb6-8a67d9df8c57","Type":"ContainerStarted","Data":"8b189f9113dd2ab0c762a16c97bcd13e6d9388fea6168fa1d513072d6b07167b"} Nov 21 15:37:35 crc kubenswrapper[4967]: E1121 15:37:35.872362 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.372299092 +0000 UTC m=+144.630820100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.872854 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" event={"ID":"6d1972c2-1c3e-4e33-b70d-52fe89843453","Type":"ContainerStarted","Data":"f6a19b2c6bc871c1ad9544b38470eb1e6272fe018361f50d8fa04de04e875d10"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.874260 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" event={"ID":"66e9814a-2c50-4b11-9412-39e1fd445bc6","Type":"ContainerStarted","Data":"18f779720cea9eac07999bf0dd0eeda1f47fc80c2ba88ce5b40aa6f6c136c5c0"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.879235 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" event={"ID":"659073fe-e665-4953-98b7-fe8e6ac5e075","Type":"ContainerStarted","Data":"eb8682f4ccec6716d91b59ba095ea8be60e1e4b98de6aca5ff40611651768754"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.890045 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" event={"ID":"b2ae3907-0438-4add-bdcf-0045ae419f0a","Type":"ContainerStarted","Data":"32ade97269869892ae09d6278825ba3aac93a08f364caf9a6723eb082d277d44"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.898098 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" event={"ID":"228df940-1987-44bb-a72e-944bafc22d91","Type":"ContainerStarted","Data":"65f4f424918849707acccbe920b3abede09df1e08ba06ef9ad8091d00de6116e"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.898596 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-sxsl5" podStartSLOduration=119.898575157 podStartE2EDuration="1m59.898575157s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.898389392 +0000 UTC m=+144.156910400" watchObservedRunningTime="2025-11-21 15:37:35.898575157 +0000 UTC m=+144.157096165" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.920275 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" event={"ID":"5c2198a6-561a-407b-979b-67d05acfb234","Type":"ContainerStarted","Data":"07faf9b4204c344c0b704a4ac986c7a800762322776896c1c2e4d4bade2377f3"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.930069 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" event={"ID":"396d79a1-4427-49b2-b16e-89fb27df71ec","Type":"ContainerStarted","Data":"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.931374 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.935607 4967 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-lb8zd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" start-of-body= Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.935732 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.969671 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" event={"ID":"ad8312ab-9169-4277-99d0-2525382013b7","Type":"ContainerStarted","Data":"207912c8b4c67b640d9f36b56458830b4f1daeeda86375b8ee1607e65e1d5613"} Nov 21 15:37:35 crc kubenswrapper[4967]: I1121 15:37:35.971051 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:35.972557 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.472535222 +0000 UTC m=+144.731056230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.047031 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" podStartSLOduration=120.047009311 podStartE2EDuration="2m0.047009311s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:35.970457881 +0000 UTC m=+144.228978899" watchObservedRunningTime="2025-11-21 15:37:36.047009311 +0000 UTC m=+144.305530319" Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.048669 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" event={"ID":"67563d08-de10-4aac-bfdd-248379b85548","Type":"ContainerStarted","Data":"f21286e59e57d13559f2432ac68e05e381f18765596824ca9e043ef8d7f420dd"} Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.064342 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.064412 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.063711 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" event={"ID":"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7","Type":"ContainerStarted","Data":"0fc8b031a32707f7449225fd212f5bda1f81f1735172c032002820f472426b9a"} Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.073085 4967 patch_prober.go:28] interesting pod/console-operator-58897d9998-x7hlr container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.073150 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" podUID="e1b88a76-dfa5-4273-a7ab-6c0824308b04" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.073897 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.077495 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.577472038 +0000 UTC m=+144.835993046 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.077909 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-k6mnj" Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.177192 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.178614 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.678572253 +0000 UTC m=+144.937093261 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.279679 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.280261 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.780241194 +0000 UTC m=+145.038762202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.381138 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.381451 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.88140084 +0000 UTC m=+145.139921848 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.381631 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.382043 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.882035328 +0000 UTC m=+145.140556336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.482994 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.483539 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:36.983486703 +0000 UTC m=+145.242007721 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.495528 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.585455 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.585860 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.085838134 +0000 UTC m=+145.344359142 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.687450 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.687914 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.187894427 +0000 UTC m=+145.446415435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.788729 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.789706 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.289688302 +0000 UTC m=+145.548209310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.890866 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.892209 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.392173837 +0000 UTC m=+145.650694845 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:36 crc kubenswrapper[4967]: I1121 15:37:36.993264 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:36 crc kubenswrapper[4967]: E1121 15:37:36.993679 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.493659313 +0000 UTC m=+145.752180321 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.091944 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-dgdjr" event={"ID":"3b7bdc2e-d91d-420e-b70b-445ca8c31faf","Type":"ContainerStarted","Data":"90300f3d6c4cc6d431166ff830027ecf1ecda5bed0fbdf2717933c35c457e316"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.092476 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-dgdjr" event={"ID":"3b7bdc2e-d91d-420e-b70b-445ca8c31faf","Type":"ContainerStarted","Data":"7c7705c1c472843c05f57778bc7c08ffa6886a291a2853aecd23d2eedc0eb4d2"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.095004 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.095445 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.595430437 +0000 UTC m=+145.853951445 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.105523 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" event={"ID":"15dd7d8c-fd82-4a6b-a749-8182841d9db9","Type":"ContainerStarted","Data":"833e629a79f08729499da36779a103883a58921392e2c9fa1c78a5cba97b8a35"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.117106 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" event={"ID":"659073fe-e665-4953-98b7-fe8e6ac5e075","Type":"ContainerStarted","Data":"35591396d789b9d405e4b2fbb8a25ac723a8a011d620e797de5bc99e6fd05b30"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.117508 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.121505 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dmk5z" event={"ID":"ef9d0064-2b4f-416e-8300-8bdf07e2bd61","Type":"ContainerStarted","Data":"63b30bed1fd34ea0675cdd0004575b73bbf01037e6346ad2bafdf332686923ed"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.127127 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" event={"ID":"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2","Type":"ContainerStarted","Data":"4fd838d8c56fa70119eb84b5a4c93450579d81528215ab51c1359ca71226fb7c"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.128786 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" event={"ID":"52d4b2f2-f02f-4f5a-ba4c-c2d60e827e4e","Type":"ContainerStarted","Data":"deede57c0d92c00c7bd93e8d9c853ebdaa1e138e691127fa058d9cf2efd13aac"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.129353 4967 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2bjhd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.129421 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" podUID="659073fe-e665-4953-98b7-fe8e6ac5e075" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": dial tcp 10.217.0.34:5443: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.148219 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" event={"ID":"c191759b-e8ae-49d4-b2a1-b7acb2a54709","Type":"ContainerStarted","Data":"7941b1a24f0981d8f6a5b0ed0be9d7bab4b0e3629025bc180c404a9893e490eb"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.152083 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-dgdjr" podStartSLOduration=7.152057786 podStartE2EDuration="7.152057786s" podCreationTimestamp="2025-11-21 15:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.118993093 +0000 UTC m=+145.377514101" watchObservedRunningTime="2025-11-21 15:37:37.152057786 +0000 UTC m=+145.410578794" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.157193 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-l6k28" podStartSLOduration=121.157169085 podStartE2EDuration="2m1.157169085s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.151060567 +0000 UTC m=+145.409581575" watchObservedRunningTime="2025-11-21 15:37:37.157169085 +0000 UTC m=+145.415690093" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.163671 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" event={"ID":"228df940-1987-44bb-a72e-944bafc22d91","Type":"ContainerStarted","Data":"8761e16593e586ee5ee6dcde7f7021d4563b174b7e9f6c504efb8d8dfee96a27"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.196521 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.201352 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.701332771 +0000 UTC m=+145.959853979 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.211398 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" event={"ID":"5253a6fb-0e1a-4441-9a5a-24682a098f45","Type":"ContainerStarted","Data":"f2fd7cb5a05472c50245504afdc5583280c6529d5999645599935d785a385515"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.225511 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" event={"ID":"944c0231-2382-4fc2-9e88-e83b473045f8","Type":"ContainerStarted","Data":"caa9e7f4e946e5c34b7ab53afcf323243f9e3b68decd467e08b86ac6fde2a22e"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.228792 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" podStartSLOduration=121.228771711 podStartE2EDuration="2m1.228771711s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.198747506 +0000 UTC m=+145.457268544" watchObservedRunningTime="2025-11-21 15:37:37.228771711 +0000 UTC m=+145.487292739" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.230878 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-szs8c" podStartSLOduration=121.230867702 podStartE2EDuration="2m1.230867702s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.227936216 +0000 UTC m=+145.486457224" watchObservedRunningTime="2025-11-21 15:37:37.230867702 +0000 UTC m=+145.489388710" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.255171 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-q2z9m" podStartSLOduration=121.255154789 podStartE2EDuration="2m1.255154789s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.254599523 +0000 UTC m=+145.513120531" watchObservedRunningTime="2025-11-21 15:37:37.255154789 +0000 UTC m=+145.513675797" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.260730 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" event={"ID":"6d1972c2-1c3e-4e33-b70d-52fe89843453","Type":"ContainerStarted","Data":"237adddc7731275843a0bd9988228bdf298c70e1c034083c76c2f0fc679304e8"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.298774 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mv55f" podStartSLOduration=121.298747299 podStartE2EDuration="2m1.298747299s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.293740633 +0000 UTC m=+145.552261641" watchObservedRunningTime="2025-11-21 15:37:37.298747299 +0000 UTC m=+145.557268307" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.299503 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" event={"ID":"dc8caa89-0bb5-4c35-9bbe-f1212a715f59","Type":"ContainerStarted","Data":"3922c152206525dc41e048e8a3d3cec9e0a0f50f992cde79eb0cad275fd6447c"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.300017 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.301780 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.801757046 +0000 UTC m=+146.060278044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.326551 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.337915 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-gwx9x" event={"ID":"d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d","Type":"ContainerStarted","Data":"d02d9f54a02d2a7b43d21aa78641a3226c188df1025fed114b173a871ada1943"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.345259 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.353906 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.354144 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.354657 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" event={"ID":"67563d08-de10-4aac-bfdd-248379b85548","Type":"ContainerStarted","Data":"9ddec7b5b28d930baad962f48e8d6743e83253bc84364cdf69a5aad643afd3c4"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.377877 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" event={"ID":"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521","Type":"ContainerStarted","Data":"b0896f40872213970c38f3987002ef6d6bb06c0dacda3f706c473d6c059ff19b"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.386976 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" podStartSLOduration=121.386937037 podStartE2EDuration="2m1.386937037s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.371523689 +0000 UTC m=+145.630044717" watchObservedRunningTime="2025-11-21 15:37:37.386937037 +0000 UTC m=+145.645458055" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.401202 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" event={"ID":"5c2198a6-561a-407b-979b-67d05acfb234","Type":"ContainerStarted","Data":"21c0f1f5bd1d9eb06c4c18c340c925176d2f00589ae77c952642512e28633a68"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.402599 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.405949 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:37.905931431 +0000 UTC m=+146.164452439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.414997 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-c5fls" podStartSLOduration=121.414973584 podStartE2EDuration="2m1.414973584s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.410207565 +0000 UTC m=+145.668728593" watchObservedRunningTime="2025-11-21 15:37:37.414973584 +0000 UTC m=+145.673494592" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.453076 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-xdn6j" podStartSLOduration=121.453055773 podStartE2EDuration="2m1.453055773s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.452819836 +0000 UTC m=+145.711340854" watchObservedRunningTime="2025-11-21 15:37:37.453055773 +0000 UTC m=+145.711576781" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.472487 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" event={"ID":"70fb4095-863d-445d-bc3a-bdb264c4abc1","Type":"ContainerStarted","Data":"13ac71d005a5959956cd1b8c7dc057190a27a5622188ab58ac934c7883730088"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.474379 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" event={"ID":"d72fe727-d902-4315-afb6-8a67d9df8c57","Type":"ContainerStarted","Data":"793bdbf868b261239c03a538844ba6cd357dd1407fc1a0eb38f06c10d3fef5a0"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.491231 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.491679 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.492746 4967 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-nkdl7 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.492813 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" podUID="944c0231-2382-4fc2-9e88-e83b473045f8" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.493300 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" event={"ID":"66e9814a-2c50-4b11-9412-39e1fd445bc6","Type":"ContainerStarted","Data":"e5fb0c62bd4939fa55f09609f79d678d02c5b0ab7f7441819c90dd055ebf18a4"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.503763 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" event={"ID":"67ec7435-1c30-438c-8da5-8231ab6cf336","Type":"ContainerStarted","Data":"f1bd3148c69f34ef0bd8060b699b23a49ffa3f51f1e4b5c8d35b3f5de5a8c3ff"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.505106 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.520959 4967 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-2bzth container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.521029 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" podUID="67ec7435-1c30-438c-8da5-8231ab6cf336" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.528166 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" event={"ID":"54732f6f-2547-48d2-bb98-2e90611b48bf","Type":"ContainerStarted","Data":"c3af8183e81f1b775bf218396af32041077e8485cffe1e98522c27667d01b131"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.556132 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.557326 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.057293439 +0000 UTC m=+146.315814447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.594274 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" event={"ID":"a02f9e5c-5484-4406-89c3-f7803420a47e","Type":"ContainerStarted","Data":"8e18c75fa5d750b43eddedd3d99a3cf4806c4de96927196f8415e169775ec040"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.597114 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-tz287" podStartSLOduration=121.597082008 podStartE2EDuration="2m1.597082008s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.594523734 +0000 UTC m=+145.853044742" watchObservedRunningTime="2025-11-21 15:37:37.597082008 +0000 UTC m=+145.855603016" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.619580 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" event={"ID":"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7","Type":"ContainerStarted","Data":"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.620663 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-gwx9x" podStartSLOduration=121.620638925 podStartE2EDuration="2m1.620638925s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.619963705 +0000 UTC m=+145.878484733" watchObservedRunningTime="2025-11-21 15:37:37.620638925 +0000 UTC m=+145.879159943" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.621092 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.644544 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-g9gvx" event={"ID":"a8970b78-e931-4617-9b58-6845c95c48f4","Type":"ContainerStarted","Data":"46d8dc510b733f55ce8f83cdcdff35a499398d3f0e0f87d21c5bf268c95efbaf"} Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.663194 4967 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-lb8zd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.663264 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.664540 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.664833 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.164823211 +0000 UTC m=+146.423344219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.665195 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-hlspg" podStartSLOduration=120.665173612 podStartE2EDuration="2m0.665173612s" podCreationTimestamp="2025-11-21 15:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.663122792 +0000 UTC m=+145.921643800" watchObservedRunningTime="2025-11-21 15:37:37.665173612 +0000 UTC m=+145.923694620" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.682700 4967 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ql9tj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.682942 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.705133 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-gljzt" podStartSLOduration=121.705109225 podStartE2EDuration="2m1.705109225s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.702081317 +0000 UTC m=+145.960602335" watchObservedRunningTime="2025-11-21 15:37:37.705109225 +0000 UTC m=+145.963630243" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.753083 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" podStartSLOduration=121.753065772 podStartE2EDuration="2m1.753065772s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.75093614 +0000 UTC m=+146.009457148" watchObservedRunningTime="2025-11-21 15:37:37.753065772 +0000 UTC m=+146.011586780" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.767843 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.769964 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.269942783 +0000 UTC m=+146.528463791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.781834 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" podStartSLOduration=121.781813479 podStartE2EDuration="2m1.781813479s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.773740214 +0000 UTC m=+146.032261222" watchObservedRunningTime="2025-11-21 15:37:37.781813479 +0000 UTC m=+146.040334487" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.847338 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-g9gvx" podStartSLOduration=7.847298436 podStartE2EDuration="7.847298436s" podCreationTimestamp="2025-11-21 15:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.84571969 +0000 UTC m=+146.104240728" watchObservedRunningTime="2025-11-21 15:37:37.847298436 +0000 UTC m=+146.105819444" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.847638 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" podStartSLOduration=121.847633696 podStartE2EDuration="2m1.847633696s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:37.812116502 +0000 UTC m=+146.070637520" watchObservedRunningTime="2025-11-21 15:37:37.847633696 +0000 UTC m=+146.106154704" Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.877087 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.877618 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.377595549 +0000 UTC m=+146.636116557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.981226 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.981494 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.481448174 +0000 UTC m=+146.739969182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:37 crc kubenswrapper[4967]: I1121 15:37:37.981719 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:37 crc kubenswrapper[4967]: E1121 15:37:37.982138 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.482122023 +0000 UTC m=+146.740643031 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.083885 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.084166 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.584118634 +0000 UTC m=+146.842639642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.084254 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.084797 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.584788144 +0000 UTC m=+146.843309352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.189212 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.190187 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.690163063 +0000 UTC m=+146.948684071 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.291166 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.291660 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.791638299 +0000 UTC m=+147.050159307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.364097 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:38 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:38 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:38 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.364221 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.393097 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.393357 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.893292929 +0000 UTC m=+147.151813947 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.393419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.393858 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.893847566 +0000 UTC m=+147.152368654 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.495271 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.495579 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.995534487 +0000 UTC m=+147.254055505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.495969 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.496462 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:38.996451974 +0000 UTC m=+147.254973172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.597488 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.597752 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.097731984 +0000 UTC m=+147.356252992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.650375 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" event={"ID":"54732f6f-2547-48d2-bb98-2e90611b48bf","Type":"ContainerStarted","Data":"daa4adb553568ca330376f8fb8729ea84ad29733ac4ee89a9794893fcc176461"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.650454 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" event={"ID":"54732f6f-2547-48d2-bb98-2e90611b48bf","Type":"ContainerStarted","Data":"34f5fae34f65fb67dab77f15cf4abefe6b9c2bb7e9c832ce947900abac2036fc"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.652458 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" event={"ID":"6d1972c2-1c3e-4e33-b70d-52fe89843453","Type":"ContainerStarted","Data":"7ebf39cd6ec8f538128fb466b78ee45af3151e19ec3dbca2a9a0b0f5cafcbc41"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.655011 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" event={"ID":"66e9814a-2c50-4b11-9412-39e1fd445bc6","Type":"ContainerStarted","Data":"69abce4377cef39c2b0bcdf3c238eab6c3802ef8274af5a2fe0a799057f5e618"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.655174 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.656519 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" event={"ID":"ad8312ab-9169-4277-99d0-2525382013b7","Type":"ContainerStarted","Data":"b321f32f3cadfc2534b8314effd1efb5deeb4340b57a5d71bd57b06fa01886dd"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.658589 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" event={"ID":"569fa728-8c24-4618-9655-612ea2297aad","Type":"ContainerStarted","Data":"bcb6c3f07348c8b263fbd6f53eb94d8f40c88b8a5a5eceace30c2545919c424b"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.658628 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" event={"ID":"569fa728-8c24-4618-9655-612ea2297aad","Type":"ContainerStarted","Data":"bf258d392cfc11221f5bb51cee7b7aeaa9e9a7f8789a847036d3df30836b1829"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.660923 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" event={"ID":"70fb4095-863d-445d-bc3a-bdb264c4abc1","Type":"ContainerStarted","Data":"92db9edd6d05582c92c22ff48e5b81abfc9b8da4f73ccb4ede35682d4ba40964"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.663274 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-km26f" event={"ID":"28189f52-065e-4d6f-b959-6a052477f10f","Type":"ContainerStarted","Data":"ec8e19bd4fc9e40b3c8455d763c652e53f52b54cddad5e07a9cb5aebc69f1cbe"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.663359 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-km26f" event={"ID":"28189f52-065e-4d6f-b959-6a052477f10f","Type":"ContainerStarted","Data":"fcd292a204fbef6efadb680bad7119b7612f54b3bd0545ae6a0da5ef4b495cb6"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.665212 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" event={"ID":"b2ae3907-0438-4add-bdcf-0045ae419f0a","Type":"ContainerStarted","Data":"c55d3ad24ef0bfe68b267178083e5fdef3621bab4d1c0d1ff16817970ad48793"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.665250 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" event={"ID":"b2ae3907-0438-4add-bdcf-0045ae419f0a","Type":"ContainerStarted","Data":"7886e81a7a2aa97a2bd744bb0993a8fb0579704ac7d6201ea1a5b3dd4c8f9060"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.666911 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dmk5z" event={"ID":"ef9d0064-2b4f-416e-8300-8bdf07e2bd61","Type":"ContainerStarted","Data":"ea692e5ab399bde1e00ad7be51a9eb25e257681c4472056290557aa98de582ce"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.666994 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-dmk5z" event={"ID":"ef9d0064-2b4f-416e-8300-8bdf07e2bd61","Type":"ContainerStarted","Data":"1c30097443233e2e66dcb8451caefd544f60fd8e49ad7c813805c5ccbbae5bed"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.668346 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" event={"ID":"22aa627b-e824-4f54-8ee9-e9db1e7b7da3","Type":"ContainerStarted","Data":"21f6c46b99139d1f22978d5871993d6d6527b0373955064b6d56bbbcb764bcce"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.670434 4967 generic.go:334] "Generic (PLEG): container finished" podID="3874fcd1-fa6e-4b2c-b9e3-ce42c0275521" containerID="b0896f40872213970c38f3987002ef6d6bb06c0dacda3f706c473d6c059ff19b" exitCode=0 Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.670655 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" event={"ID":"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521","Type":"ContainerDied","Data":"b0896f40872213970c38f3987002ef6d6bb06c0dacda3f706c473d6c059ff19b"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.670723 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" event={"ID":"3874fcd1-fa6e-4b2c-b9e3-ce42c0275521","Type":"ContainerStarted","Data":"a1073f8842df24367e93ec0eacfed043fd2332798cd249fe3529495088906269"} Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.672596 4967 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ql9tj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.672668 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.673876 4967 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-2bzth container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" start-of-body= Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.673966 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" podUID="67ec7435-1c30-438c-8da5-8231ab6cf336" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.42:8443/healthz\": dial tcp 10.217.0.42:8443: connect: connection refused" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.695205 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-t97cr" podStartSLOduration=122.695183343 podStartE2EDuration="2m2.695183343s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.694169523 +0000 UTC m=+146.952690541" watchObservedRunningTime="2025-11-21 15:37:38.695183343 +0000 UTC m=+146.953704361" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.700876 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.701299 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.20127101 +0000 UTC m=+147.459792018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.761170 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-92chf" podStartSLOduration=122.761145804 podStartE2EDuration="2m2.761145804s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.759919118 +0000 UTC m=+147.018440126" watchObservedRunningTime="2025-11-21 15:37:38.761145804 +0000 UTC m=+147.019666812" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.802525 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.810132 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.31010119 +0000 UTC m=+147.568622348 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.827471 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.877816 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-6g4w8" podStartSLOduration=122.877777411 podStartE2EDuration="2m2.877777411s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.877544854 +0000 UTC m=+147.136065862" watchObservedRunningTime="2025-11-21 15:37:38.877777411 +0000 UTC m=+147.136298409" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.880125 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" podStartSLOduration=122.880116169 podStartE2EDuration="2m2.880116169s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.832699628 +0000 UTC m=+147.091220636" watchObservedRunningTime="2025-11-21 15:37:38.880116169 +0000 UTC m=+147.138637177" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.905822 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:38 crc kubenswrapper[4967]: E1121 15:37:38.906546 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.406515388 +0000 UTC m=+147.665036586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.935038 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-w46k5" podStartSLOduration=122.935009058 podStartE2EDuration="2m2.935009058s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.932523576 +0000 UTC m=+147.191044584" watchObservedRunningTime="2025-11-21 15:37:38.935009058 +0000 UTC m=+147.193530066" Nov 21 15:37:38 crc kubenswrapper[4967]: I1121 15:37:38.966993 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-dmk5z" podStartSLOduration=8.966966269 podStartE2EDuration="8.966966269s" podCreationTimestamp="2025-11-21 15:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:38.964441565 +0000 UTC m=+147.222962583" watchObservedRunningTime="2025-11-21 15:37:38.966966269 +0000 UTC m=+147.225487277" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.006805 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.007155 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.507138759 +0000 UTC m=+147.765659767 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.045687 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-zl28t" podStartSLOduration=122.045651881 podStartE2EDuration="2m2.045651881s" podCreationTimestamp="2025-11-21 15:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:39.042642673 +0000 UTC m=+147.301163691" watchObservedRunningTime="2025-11-21 15:37:39.045651881 +0000 UTC m=+147.304172899" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.096281 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-km26f" podStartSLOduration=123.096265685 podStartE2EDuration="2m3.096265685s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:39.093078122 +0000 UTC m=+147.351599130" watchObservedRunningTime="2025-11-21 15:37:39.096265685 +0000 UTC m=+147.354786703" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.109120 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.109649 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.609631664 +0000 UTC m=+147.868152672 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.134621 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" podStartSLOduration=123.134602741 podStartE2EDuration="2m3.134602741s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:39.13144867 +0000 UTC m=+147.389969678" watchObservedRunningTime="2025-11-21 15:37:39.134602741 +0000 UTC m=+147.393123739" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.163173 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-882pz" podStartSLOduration=123.163152983 podStartE2EDuration="2m3.163152983s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:39.162606377 +0000 UTC m=+147.421127405" watchObservedRunningTime="2025-11-21 15:37:39.163152983 +0000 UTC m=+147.421673981" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.188827 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-j66mb" podStartSLOduration=123.18880347 podStartE2EDuration="2m3.18880347s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:39.186967037 +0000 UTC m=+147.445488045" watchObservedRunningTime="2025-11-21 15:37:39.18880347 +0000 UTC m=+147.447324488" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.210502 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.210932 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.710899594 +0000 UTC m=+147.969420612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.211132 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.211545 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.711528562 +0000 UTC m=+147.970049570 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.312882 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.313128 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.81308898 +0000 UTC m=+148.071609988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.313249 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.313700 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.813683507 +0000 UTC m=+148.072204515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.348112 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:39 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:39 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:39 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.348208 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.414696 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.414987 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.914942627 +0000 UTC m=+148.173463635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.415446 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.415857 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:39.915849013 +0000 UTC m=+148.174370021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.503502 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.517444 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.517652 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.017614857 +0000 UTC m=+148.276135865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.517725 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.517844 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.518171 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.518640 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.018621997 +0000 UTC m=+148.277143005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.519027 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.527805 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.619791 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.620209 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.120169554 +0000 UTC m=+148.378690562 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.620337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.620457 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.620507 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.620882 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.120859165 +0000 UTC m=+148.379380173 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.626141 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.626144 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.667716 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.672473 4967 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2bjhd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.34:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.672541 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" podUID="659073fe-e665-4953-98b7-fe8e6ac5e075" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.34:5443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.694338 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" event={"ID":"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2","Type":"ContainerStarted","Data":"cd477b1a730c02922dd87aa31ef2c22d5f72eef7ee63e98bff12638dd17ef0c0"} Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.697977 4967 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ql9tj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" start-of-body= Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.698054 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.37:8080/healthz\": dial tcp 10.217.0.37:8080: connect: connection refused" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.713772 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-2bzth" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.723932 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.724460 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.224438361 +0000 UTC m=+148.482959369 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.753744 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.767062 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.828121 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.835231 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.335213718 +0000 UTC m=+148.593734716 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.930165 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.930306 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.430284986 +0000 UTC m=+148.688805994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:39 crc kubenswrapper[4967]: I1121 15:37:39.930415 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:39 crc kubenswrapper[4967]: E1121 15:37:39.930755 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.43074789 +0000 UTC m=+148.689268898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.031918 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.032446 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.532429391 +0000 UTC m=+148.790950399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.138378 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.138877 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.638859511 +0000 UTC m=+148.897380519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.239032 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.239163 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.739137552 +0000 UTC m=+148.997658560 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.239422 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.239915 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.739894344 +0000 UTC m=+148.998415352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.341454 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.342185 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.842164143 +0000 UTC m=+149.100685151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.351667 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:40 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:40 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:40 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.351747 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.443763 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.444182 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:40.944169814 +0000 UTC m=+149.202690822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.552877 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.553229 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.05321351 +0000 UTC m=+149.311734518 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: W1121 15:37:40.631486 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-4596823b2f30c6f570bec138abd1484579ab41e8455a476914b5e1070e03c644 WatchSource:0}: Error finding container 4596823b2f30c6f570bec138abd1484579ab41e8455a476914b5e1070e03c644: Status 404 returned error can't find the container with id 4596823b2f30c6f570bec138abd1484579ab41e8455a476914b5e1070e03c644 Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.658008 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.157990992 +0000 UTC m=+149.416512000 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.654308 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.744552 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"4596823b2f30c6f570bec138abd1484579ab41e8455a476914b5e1070e03c644"} Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.759166 4967 generic.go:334] "Generic (PLEG): container finished" podID="d72fe727-d902-4315-afb6-8a67d9df8c57" containerID="793bdbf868b261239c03a538844ba6cd357dd1407fc1a0eb38f06c10d3fef5a0" exitCode=0 Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.759259 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" event={"ID":"d72fe727-d902-4315-afb6-8a67d9df8c57","Type":"ContainerDied","Data":"793bdbf868b261239c03a538844ba6cd357dd1407fc1a0eb38f06c10d3fef5a0"} Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.767874 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.768295 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.268278834 +0000 UTC m=+149.526799842 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.788722 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e790761ba24bd2c6db178486e88990b394776ee8eb0f0a7f161e611163fc8a72"} Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.872493 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.873785 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.373768717 +0000 UTC m=+149.632289725 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.887705 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9pz5w" Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.973486 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.973806 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.473759759 +0000 UTC m=+149.732280777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:40 crc kubenswrapper[4967]: I1121 15:37:40.974298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:40 crc kubenswrapper[4967]: E1121 15:37:40.974795 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.474775689 +0000 UTC m=+149.733296697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.016153 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.017203 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.021172 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.045532 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.083381 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.083918 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.583897517 +0000 UTC m=+149.842418525 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.185160 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.185230 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.185268 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8vl5\" (UniqueName: \"kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.185295 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.185663 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.685648041 +0000 UTC m=+149.944169049 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.213045 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.214614 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.218133 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.233229 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287190 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.287354 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.787333773 +0000 UTC m=+150.045854781 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287407 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287457 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287490 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8vl5\" (UniqueName: \"kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287519 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287911 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.287922 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.288425 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.788405614 +0000 UTC m=+150.046926622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.312742 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8vl5\" (UniqueName: \"kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5\") pod \"certified-operators-7kkh6\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.349064 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:41 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:41 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:41 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.349154 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.399408 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.401119 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.408524 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h72rn\" (UniqueName: \"kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.409751 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.909715217 +0000 UTC m=+150.168236225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.409906 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.409987 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.410028 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.410635 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:41.910599693 +0000 UTC m=+150.169120711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.429920 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.431881 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.447464 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.511466 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.511777 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:42.011740969 +0000 UTC m=+150.270261977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512280 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gt9c\" (UniqueName: \"kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512339 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512367 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h72rn\" (UniqueName: \"kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512392 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512420 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.513383 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:42.013367936 +0000 UTC m=+150.271888934 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.512459 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.513666 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.514695 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.515022 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.525747 4967 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.540131 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h72rn\" (UniqueName: \"kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn\") pod \"community-operators-w7s76\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.618800 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.619033 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.619069 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.619143 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gt9c\" (UniqueName: \"kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.619572 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:42.119556519 +0000 UTC m=+150.378077527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.619928 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.620905 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.626004 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.629168 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.635115 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.648679 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gt9c\" (UniqueName: \"kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c\") pod \"certified-operators-m5b62\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.720702 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.721159 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbch9\" (UniqueName: \"kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.721188 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.721212 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.721630 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 15:37:42.221617422 +0000 UTC m=+150.480138420 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kls8w" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.782352 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.790798 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.801823 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"6a41b560c0c57567173be04fb13a5ffa4e72e7aa06215488666754c4afeb8604"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.810561 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" event={"ID":"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2","Type":"ContainerStarted","Data":"262f2902603758abfccc8646b1858bf2d9d3ca7431acd78daa7d88758bca3163"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.810629 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" event={"ID":"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2","Type":"ContainerStarted","Data":"9cfd4d7fc3163bb2404e6e75b8f4625c5eefec3da72898a05f4dd6683d8df29b"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.810647 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" event={"ID":"7d8ef3bf-0cff-4bc1-a64d-85aa41a942c2","Type":"ContainerStarted","Data":"5bf0dc8fbfec02dc6bbeb3864deb4a675bb4fff9b51b7e975b12d4a0944b1f55"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.812584 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"38dd6f4971076bf3ef3d5e16be72a8dc1059290428a85038b5abecc1064c59e3"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.813391 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.817965 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"01aa59e9be98c705dcf4f60633397a2143f7c61a69237fc8e2be08ca43527b7a"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.818005 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8ee29877448e97715005f5d877f4e0276161341f48b4109fe19bf0a6608ba79a"} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.822266 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.822574 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.822713 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.822770 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbch9\" (UniqueName: \"kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: E1121 15:37:41.823220 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 15:37:42.32319189 +0000 UTC m=+150.581712898 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.823453 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.823648 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.835606 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.854926 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbch9\" (UniqueName: \"kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9\") pod \"community-operators-dfgb9\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.880834 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-x2cf9" podStartSLOduration=11.880812329 podStartE2EDuration="11.880812329s" podCreationTimestamp="2025-11-21 15:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:41.854038539 +0000 UTC m=+150.112559547" watchObservedRunningTime="2025-11-21 15:37:41.880812329 +0000 UTC m=+150.139333337" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.899781 4967 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-21T15:37:41.525785718Z","Handler":null,"Name":""} Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.910820 4967 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.910977 4967 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.923726 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.925480 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.925724 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.928494 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.928538 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.938706 4967 patch_prober.go:28] interesting pod/console-f9d7485db-m45jq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.938899 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m45jq" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.969079 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:37:41 crc kubenswrapper[4967]: I1121 15:37:41.992377 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kls8w\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.027154 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.048729 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.119206 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.179661 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.331488 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume\") pod \"d72fe727-d902-4315-afb6-8a67d9df8c57\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.331967 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume\") pod \"d72fe727-d902-4315-afb6-8a67d9df8c57\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.332218 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6slv\" (UniqueName: \"kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv\") pod \"d72fe727-d902-4315-afb6-8a67d9df8c57\" (UID: \"d72fe727-d902-4315-afb6-8a67d9df8c57\") " Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.333124 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume" (OuterVolumeSpecName: "config-volume") pod "d72fe727-d902-4315-afb6-8a67d9df8c57" (UID: "d72fe727-d902-4315-afb6-8a67d9df8c57"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.342221 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.342942 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d72fe727-d902-4315-afb6-8a67d9df8c57" (UID: "d72fe727-d902-4315-afb6-8a67d9df8c57"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.343156 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv" (OuterVolumeSpecName: "kube-api-access-v6slv") pod "d72fe727-d902-4315-afb6-8a67d9df8c57" (UID: "d72fe727-d902-4315-afb6-8a67d9df8c57"). InnerVolumeSpecName "kube-api-access-v6slv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:37:42 crc kubenswrapper[4967]: W1121 15:37:42.352693 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeedebded_dcce_4646_837f_26b33ed68cfd.slice/crio-c9ee5d47253f201edd7899cb6cadbd9e4f7a4c883916818bbc124b78e4284bfb WatchSource:0}: Error finding container c9ee5d47253f201edd7899cb6cadbd9e4f7a4c883916818bbc124b78e4284bfb: Status 404 returned error can't find the container with id c9ee5d47253f201edd7899cb6cadbd9e4f7a4c883916818bbc124b78e4284bfb Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.352898 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:42 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:42 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:42 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.352959 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.357345 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:37:42 crc kubenswrapper[4967]: W1121 15:37:42.395716 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaba8d061_5e25_4f5b_84ae_03d940117fe4.slice/crio-5a3db8d50000d88bae6107a82a605adf9a39f3f4e2645038230458fff1222d1f WatchSource:0}: Error finding container 5a3db8d50000d88bae6107a82a605adf9a39f3f4e2645038230458fff1222d1f: Status 404 returned error can't find the container with id 5a3db8d50000d88bae6107a82a605adf9a39f3f4e2645038230458fff1222d1f Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.405299 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.435443 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d72fe727-d902-4315-afb6-8a67d9df8c57-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.435473 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d72fe727-d902-4315-afb6-8a67d9df8c57-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.435485 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6slv\" (UniqueName: \"kubernetes.io/projected/d72fe727-d902-4315-afb6-8a67d9df8c57-kube-api-access-v6slv\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.479540 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:37:42 crc kubenswrapper[4967]: W1121 15:37:42.489012 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92e1234d_95c3_4198_9144_3d993ce2c551.slice/crio-5002d11c262e98bc62c20c798a315350090c79841aa954a90790e0cb678e3088 WatchSource:0}: Error finding container 5002d11c262e98bc62c20c798a315350090c79841aa954a90790e0cb678e3088: Status 404 returned error can't find the container with id 5002d11c262e98bc62c20c798a315350090c79841aa954a90790e0cb678e3088 Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.493755 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.499343 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-nkdl7" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.545545 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.594033 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.594098 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.594432 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.594498 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.597369 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-x7hlr" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.631097 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.711639 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 15:37:42 crc kubenswrapper[4967]: E1121 15:37:42.711927 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d72fe727-d902-4315-afb6-8a67d9df8c57" containerName="collect-profiles" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.711940 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d72fe727-d902-4315-afb6-8a67d9df8c57" containerName="collect-profiles" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.712051 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d72fe727-d902-4315-afb6-8a67d9df8c57" containerName="collect-profiles" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.712512 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.718003 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.718625 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.729530 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.746100 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.746150 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.752729 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.754067 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.761295 4967 patch_prober.go:28] interesting pod/apiserver-76f77b778f-km26f container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]log ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]etcd ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/generic-apiserver-start-informers ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/max-in-flight-filter ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 21 15:37:42 crc kubenswrapper[4967]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/project.openshift.io-projectcache ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-startinformers ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 21 15:37:42 crc kubenswrapper[4967]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 21 15:37:42 crc kubenswrapper[4967]: livez check failed Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.761381 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-km26f" podUID="28189f52-065e-4d6f-b959-6a052477f10f" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.859008 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.859076 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.859209 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.859996 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" event={"ID":"274aead1-3e11-4349-99be-32e19bfe7d78","Type":"ContainerStarted","Data":"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.860038 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" event={"ID":"274aead1-3e11-4349-99be-32e19bfe7d78","Type":"ContainerStarted","Data":"d523f2e7b6d76f71bb15a8dd66084e5eb146a30047f1cb2f1d24ec6acd22b878"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.860928 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.875861 4967 generic.go:334] "Generic (PLEG): container finished" podID="92e1234d-95c3-4198-9144-3d993ce2c551" containerID="58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376" exitCode=0 Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.875969 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerDied","Data":"58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.876016 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerStarted","Data":"5002d11c262e98bc62c20c798a315350090c79841aa954a90790e0cb678e3088"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.879400 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2bjhd" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.879739 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.882547 4967 generic.go:334] "Generic (PLEG): container finished" podID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerID="94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd" exitCode=0 Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.882627 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerDied","Data":"94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.882663 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerStarted","Data":"7cd4b299605e6dbe28f1a7dbd2f544146d3b595cba0152aae99f48e9fad1c9ed"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.886209 4967 generic.go:334] "Generic (PLEG): container finished" podID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerID="094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22" exitCode=0 Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.886295 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerDied","Data":"094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.886341 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerStarted","Data":"5a3db8d50000d88bae6107a82a605adf9a39f3f4e2645038230458fff1222d1f"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.900170 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" event={"ID":"d72fe727-d902-4315-afb6-8a67d9df8c57","Type":"ContainerDied","Data":"8b189f9113dd2ab0c762a16c97bcd13e6d9388fea6168fa1d513072d6b07167b"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.900224 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b189f9113dd2ab0c762a16c97bcd13e6d9388fea6168fa1d513072d6b07167b" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.900228 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.902407 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" podStartSLOduration=126.902380864 podStartE2EDuration="2m6.902380864s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:42.899916063 +0000 UTC m=+151.158437071" watchObservedRunningTime="2025-11-21 15:37:42.902380864 +0000 UTC m=+151.160901882" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.919271 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.920329 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.923714 4967 generic.go:334] "Generic (PLEG): container finished" podID="eedebded-dcce-4646-837f-26b33ed68cfd" containerID="24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2" exitCode=0 Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.925005 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerDied","Data":"24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2"} Nov 21 15:37:42 crc kubenswrapper[4967]: I1121 15:37:42.925043 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerStarted","Data":"c9ee5d47253f201edd7899cb6cadbd9e4f7a4c883916818bbc124b78e4284bfb"} Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.029660 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.032009 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.033521 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.040775 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.152412 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.165225 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.165268 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.165378 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rljg\" (UniqueName: \"kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.266438 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.266549 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rljg\" (UniqueName: \"kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.266613 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.266993 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.267379 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.313586 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rljg\" (UniqueName: \"kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg\") pod \"redhat-marketplace-48b7h\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.343328 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.353494 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:43 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:43 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:43 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.353554 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.420302 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.421437 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.443186 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.462633 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.473293 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.572882 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.572952 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.573035 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5fxj\" (UniqueName: \"kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.677877 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5fxj\" (UniqueName: \"kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.678381 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.678422 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.678893 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.678967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.745277 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5fxj\" (UniqueName: \"kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj\") pod \"redhat-marketplace-scgn5\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.760209 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.819980 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:37:43 crc kubenswrapper[4967]: W1121 15:37:43.844428 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb39663f2_10f5_47c8_817d_7667d49539a0.slice/crio-0811fef926f86c2d0318f9e5b9db5be3a9b9e1430c32b09017ec398db96a61a6 WatchSource:0}: Error finding container 0811fef926f86c2d0318f9e5b9db5be3a9b9e1430c32b09017ec398db96a61a6: Status 404 returned error can't find the container with id 0811fef926f86c2d0318f9e5b9db5be3a9b9e1430c32b09017ec398db96a61a6 Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.948030 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerStarted","Data":"0811fef926f86c2d0318f9e5b9db5be3a9b9e1430c32b09017ec398db96a61a6"} Nov 21 15:37:43 crc kubenswrapper[4967]: I1121 15:37:43.965324 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"99ae900c-764b-4b98-9fce-c259ef6fb004","Type":"ContainerStarted","Data":"2aa5621d0b660dc87463cc5991e57ecfee0bd557b91f185d2c28dbf47c87de05"} Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.020016 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.213239 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.215986 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.220845 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.226064 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.288724 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.288782 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.288805 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv72s\" (UniqueName: \"kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.346478 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:44 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:44 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:44 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.346561 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.390144 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv72s\" (UniqueName: \"kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.390291 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.390330 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.391731 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.391809 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.427108 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv72s\" (UniqueName: \"kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s\") pod \"redhat-operators-rr6kq\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.594263 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.609482 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.610815 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.623707 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.693651 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.693748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2rml\" (UniqueName: \"kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.693865 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.795324 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.795925 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.795962 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2rml\" (UniqueName: \"kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.796439 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.796459 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.819433 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2rml\" (UniqueName: \"kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml\") pod \"redhat-operators-dmzs6\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.955352 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.987265 4967 generic.go:334] "Generic (PLEG): container finished" podID="e056f496-158f-4033-8d35-a93e65357dbb" containerID="b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5" exitCode=0 Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.987701 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerDied","Data":"b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5"} Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.987798 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerStarted","Data":"e68aee7f462f4020994cb08e392527ce9a5534477edcfc1085118a6f2c4b6598"} Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.992292 4967 generic.go:334] "Generic (PLEG): container finished" podID="b39663f2-10f5-47c8-817d-7667d49539a0" containerID="c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde" exitCode=0 Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.992354 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerDied","Data":"c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde"} Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.995788 4967 generic.go:334] "Generic (PLEG): container finished" podID="99ae900c-764b-4b98-9fce-c259ef6fb004" containerID="25dc01a396b506b774f22f34e12e0502bbb4703da2fc90cab9b3963f2c8e1853" exitCode=0 Nov 21 15:37:44 crc kubenswrapper[4967]: I1121 15:37:44.996479 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"99ae900c-764b-4b98-9fce-c259ef6fb004","Type":"ContainerDied","Data":"25dc01a396b506b774f22f34e12e0502bbb4703da2fc90cab9b3963f2c8e1853"} Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.147687 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:37:45 crc kubenswrapper[4967]: W1121 15:37:45.175078 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8caeadee_cb78_47a9_b93f_e4a8e270a952.slice/crio-806f00f7ba86612cdf704acb8983af70e3b5cc900fff2588c81d9c575c08f8a7 WatchSource:0}: Error finding container 806f00f7ba86612cdf704acb8983af70e3b5cc900fff2588c81d9c575c08f8a7: Status 404 returned error can't find the container with id 806f00f7ba86612cdf704acb8983af70e3b5cc900fff2588c81d9c575c08f8a7 Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.355047 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:45 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:45 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:45 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.356139 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.506701 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.523789 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.528797 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.528933 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.531070 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.533448 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.614489 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.614549 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.716175 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.716241 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.716776 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.736708 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:45 crc kubenswrapper[4967]: I1121 15:37:45.877047 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.005510 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerStarted","Data":"806f00f7ba86612cdf704acb8983af70e3b5cc900fff2588c81d9c575c08f8a7"} Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.011767 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerStarted","Data":"fc9b15a5d11db00bc9f86b1e0ee4b3077171b8d5b2c75e41c78c3553b9ea6c82"} Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.186347 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.349709 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:46 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:46 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:46 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.349771 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.377887 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.425464 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir\") pod \"99ae900c-764b-4b98-9fce-c259ef6fb004\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.425572 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "99ae900c-764b-4b98-9fce-c259ef6fb004" (UID: "99ae900c-764b-4b98-9fce-c259ef6fb004"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.425594 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access\") pod \"99ae900c-764b-4b98-9fce-c259ef6fb004\" (UID: \"99ae900c-764b-4b98-9fce-c259ef6fb004\") " Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.425992 4967 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/99ae900c-764b-4b98-9fce-c259ef6fb004-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.431671 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "99ae900c-764b-4b98-9fce-c259ef6fb004" (UID: "99ae900c-764b-4b98-9fce-c259ef6fb004"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.535202 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.535289 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:37:46 crc kubenswrapper[4967]: I1121 15:37:46.535602 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae900c-764b-4b98-9fce-c259ef6fb004-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.025248 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"efc2444b-afcd-4576-a510-6ebfcbdb7b01","Type":"ContainerStarted","Data":"d4a83c0618bc061d6be4ee88b1692a2872f7d7d66585ed0ce3f7e83f3e06e7bd"} Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.031998 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"99ae900c-764b-4b98-9fce-c259ef6fb004","Type":"ContainerDied","Data":"2aa5621d0b660dc87463cc5991e57ecfee0bd557b91f185d2c28dbf47c87de05"} Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.032057 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.032065 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2aa5621d0b660dc87463cc5991e57ecfee0bd557b91f185d2c28dbf47c87de05" Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.347706 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:47 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:47 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:47 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.347809 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.757853 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:47 crc kubenswrapper[4967]: I1121 15:37:47.765151 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-km26f" Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.042411 4967 generic.go:334] "Generic (PLEG): container finished" podID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerID="8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7" exitCode=0 Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.042509 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerDied","Data":"8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7"} Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.052116 4967 generic.go:334] "Generic (PLEG): container finished" podID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerID="c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38" exitCode=0 Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.052210 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerDied","Data":"c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38"} Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.056085 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"efc2444b-afcd-4576-a510-6ebfcbdb7b01","Type":"ContainerStarted","Data":"af76e024cf11e439c663c1a6eeaee37d8ab097c00cf91f88cacbe450df0a6746"} Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.091657 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.09163183 podStartE2EDuration="3.09163183s" podCreationTimestamp="2025-11-21 15:37:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:37:48.086150771 +0000 UTC m=+156.344671809" watchObservedRunningTime="2025-11-21 15:37:48.09163183 +0000 UTC m=+156.350152838" Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.349555 4967 patch_prober.go:28] interesting pod/router-default-5444994796-gwx9x container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 15:37:48 crc kubenswrapper[4967]: [-]has-synced failed: reason withheld Nov 21 15:37:48 crc kubenswrapper[4967]: [+]process-running ok Nov 21 15:37:48 crc kubenswrapper[4967]: healthz check failed Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.349625 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-gwx9x" podUID="d106a459-9dd6-4a10-b1e7-a2d8bb93ad6d" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:37:48 crc kubenswrapper[4967]: I1121 15:37:48.507975 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-dmk5z" Nov 21 15:37:49 crc kubenswrapper[4967]: I1121 15:37:49.071276 4967 generic.go:334] "Generic (PLEG): container finished" podID="efc2444b-afcd-4576-a510-6ebfcbdb7b01" containerID="af76e024cf11e439c663c1a6eeaee37d8ab097c00cf91f88cacbe450df0a6746" exitCode=0 Nov 21 15:37:49 crc kubenswrapper[4967]: I1121 15:37:49.071391 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"efc2444b-afcd-4576-a510-6ebfcbdb7b01","Type":"ContainerDied","Data":"af76e024cf11e439c663c1a6eeaee37d8ab097c00cf91f88cacbe450df0a6746"} Nov 21 15:37:49 crc kubenswrapper[4967]: I1121 15:37:49.346346 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:49 crc kubenswrapper[4967]: I1121 15:37:49.351094 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-gwx9x" Nov 21 15:37:51 crc kubenswrapper[4967]: I1121 15:37:51.913234 4967 patch_prober.go:28] interesting pod/console-f9d7485db-m45jq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 21 15:37:51 crc kubenswrapper[4967]: I1121 15:37:51.913968 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m45jq" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 21 15:37:52 crc kubenswrapper[4967]: I1121 15:37:52.595144 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:52 crc kubenswrapper[4967]: I1121 15:37:52.595218 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:52 crc kubenswrapper[4967]: I1121 15:37:52.595307 4967 patch_prober.go:28] interesting pod/downloads-7954f5f757-tdgx7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 21 15:37:52 crc kubenswrapper[4967]: I1121 15:37:52.595387 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-tdgx7" podUID="4c12eeb4-d087-4e18-a9b0-0a2211a6128d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 21 15:37:58 crc kubenswrapper[4967]: I1121 15:37:58.739419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:58 crc kubenswrapper[4967]: I1121 15:37:58.750001 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e413228d-eaa3-45fb-8adf-35e0054bf53c-metrics-certs\") pod \"network-metrics-daemon-kj7qv\" (UID: \"e413228d-eaa3-45fb-8adf-35e0054bf53c\") " pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:58 crc kubenswrapper[4967]: I1121 15:37:58.855742 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-kj7qv" Nov 21 15:37:58 crc kubenswrapper[4967]: I1121 15:37:58.971962 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.132613 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"efc2444b-afcd-4576-a510-6ebfcbdb7b01","Type":"ContainerDied","Data":"d4a83c0618bc061d6be4ee88b1692a2872f7d7d66585ed0ce3f7e83f3e06e7bd"} Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.132669 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4a83c0618bc061d6be4ee88b1692a2872f7d7d66585ed0ce3f7e83f3e06e7bd" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.132684 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.149236 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir\") pod \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.149373 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "efc2444b-afcd-4576-a510-6ebfcbdb7b01" (UID: "efc2444b-afcd-4576-a510-6ebfcbdb7b01"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.149389 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access\") pod \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\" (UID: \"efc2444b-afcd-4576-a510-6ebfcbdb7b01\") " Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.149741 4967 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.153231 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "efc2444b-afcd-4576-a510-6ebfcbdb7b01" (UID: "efc2444b-afcd-4576-a510-6ebfcbdb7b01"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:37:59 crc kubenswrapper[4967]: I1121 15:37:59.251494 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/efc2444b-afcd-4576-a510-6ebfcbdb7b01-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:01 crc kubenswrapper[4967]: I1121 15:38:01.916873 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:38:01 crc kubenswrapper[4967]: I1121 15:38:01.920916 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:38:02 crc kubenswrapper[4967]: I1121 15:38:02.125638 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:38:02 crc kubenswrapper[4967]: I1121 15:38:02.601924 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-tdgx7" Nov 21 15:38:02 crc kubenswrapper[4967]: E1121 15:38:02.804473 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 15:38:02 crc kubenswrapper[4967]: E1121 15:38:02.804678 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8vl5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7kkh6_openshift-marketplace(dc581da3-1d2d-4d88-a2a8-6729abd4b955): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:02 crc kubenswrapper[4967]: E1121 15:38:02.805908 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-7kkh6" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" Nov 21 15:38:05 crc kubenswrapper[4967]: E1121 15:38:05.345302 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-7kkh6" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" Nov 21 15:38:05 crc kubenswrapper[4967]: I1121 15:38:05.774999 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-kj7qv"] Nov 21 15:38:05 crc kubenswrapper[4967]: W1121 15:38:05.796251 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode413228d_eaa3_45fb_8adf_35e0054bf53c.slice/crio-4f93028211ee6a1c00211f6a32680e70302016f095dc1175b6078b0498e964ba WatchSource:0}: Error finding container 4f93028211ee6a1c00211f6a32680e70302016f095dc1175b6078b0498e964ba: Status 404 returned error can't find the container with id 4f93028211ee6a1c00211f6a32680e70302016f095dc1175b6078b0498e964ba Nov 21 15:38:06 crc kubenswrapper[4967]: I1121 15:38:06.178656 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" event={"ID":"e413228d-eaa3-45fb-8adf-35e0054bf53c","Type":"ContainerStarted","Data":"4f93028211ee6a1c00211f6a32680e70302016f095dc1175b6078b0498e964ba"} Nov 21 15:38:08 crc kubenswrapper[4967]: E1121 15:38:08.062437 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 15:38:08 crc kubenswrapper[4967]: E1121 15:38:08.063700 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bbch9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-dfgb9_openshift-marketplace(92e1234d-95c3-4198-9144-3d993ce2c551): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:08 crc kubenswrapper[4967]: E1121 15:38:08.064965 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-dfgb9" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" Nov 21 15:38:08 crc kubenswrapper[4967]: I1121 15:38:08.190845 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" event={"ID":"e413228d-eaa3-45fb-8adf-35e0054bf53c","Type":"ContainerStarted","Data":"0d02cc98153901251f4d615c17cde1fbfd2011879dab6e6c420d8a7e7a0ca0c9"} Nov 21 15:38:08 crc kubenswrapper[4967]: E1121 15:38:08.192237 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-dfgb9" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" Nov 21 15:38:09 crc kubenswrapper[4967]: I1121 15:38:09.200095 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-kj7qv" event={"ID":"e413228d-eaa3-45fb-8adf-35e0054bf53c","Type":"ContainerStarted","Data":"131b473c48296d573748678776c759d90aabf58b9895a2d8e9ded5e1c3556a95"} Nov 21 15:38:09 crc kubenswrapper[4967]: E1121 15:38:09.746242 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 15:38:09 crc kubenswrapper[4967]: E1121 15:38:09.746697 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gt9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-m5b62_openshift-marketplace(aba8d061-5e25-4f5b-84ae-03d940117fe4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:09 crc kubenswrapper[4967]: E1121 15:38:09.748002 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-m5b62" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" Nov 21 15:38:10 crc kubenswrapper[4967]: I1121 15:38:10.226770 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-kj7qv" podStartSLOduration=154.226356259 podStartE2EDuration="2m34.226356259s" podCreationTimestamp="2025-11-21 15:35:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:38:10.22224882 +0000 UTC m=+178.480769828" watchObservedRunningTime="2025-11-21 15:38:10.226356259 +0000 UTC m=+178.484877287" Nov 21 15:38:10 crc kubenswrapper[4967]: E1121 15:38:10.434831 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 15:38:10 crc kubenswrapper[4967]: E1121 15:38:10.435057 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h72rn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-w7s76_openshift-marketplace(eedebded-dcce-4646-837f-26b33ed68cfd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:10 crc kubenswrapper[4967]: E1121 15:38:10.436274 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-w7s76" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" Nov 21 15:38:13 crc kubenswrapper[4967]: I1121 15:38:13.440708 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8r5qd" Nov 21 15:38:14 crc kubenswrapper[4967]: E1121 15:38:14.122933 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-m5b62" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" Nov 21 15:38:14 crc kubenswrapper[4967]: E1121 15:38:14.123117 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-w7s76" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" Nov 21 15:38:16 crc kubenswrapper[4967]: I1121 15:38:16.522308 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:38:16 crc kubenswrapper[4967]: I1121 15:38:16.522411 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:38:18 crc kubenswrapper[4967]: E1121 15:38:18.774707 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 15:38:18 crc kubenswrapper[4967]: E1121 15:38:18.775187 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h5fxj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-scgn5_openshift-marketplace(e056f496-158f-4033-8d35-a93e65357dbb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:18 crc kubenswrapper[4967]: E1121 15:38:18.776435 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-scgn5" podUID="e056f496-158f-4033-8d35-a93e65357dbb" Nov 21 15:38:19 crc kubenswrapper[4967]: I1121 15:38:19.935061 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 15:38:21 crc kubenswrapper[4967]: E1121 15:38:21.535015 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 15:38:21 crc kubenswrapper[4967]: E1121 15:38:21.535190 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8rljg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-48b7h_openshift-marketplace(b39663f2-10f5-47c8-817d-7667d49539a0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:21 crc kubenswrapper[4967]: E1121 15:38:21.536391 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-48b7h" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" Nov 21 15:38:22 crc kubenswrapper[4967]: E1121 15:38:22.758908 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-scgn5" podUID="e056f496-158f-4033-8d35-a93e65357dbb" Nov 21 15:38:22 crc kubenswrapper[4967]: E1121 15:38:22.758967 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-48b7h" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" Nov 21 15:38:23 crc kubenswrapper[4967]: E1121 15:38:23.007299 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 15:38:23 crc kubenswrapper[4967]: E1121 15:38:23.008492 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f2rml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-dmzs6_openshift-marketplace(0ceb149c-9ccd-44c5-b44d-6f63435abda7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:23 crc kubenswrapper[4967]: E1121 15:38:23.010060 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-dmzs6" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" Nov 21 15:38:23 crc kubenswrapper[4967]: E1121 15:38:23.276964 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-dmzs6" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" Nov 21 15:38:26 crc kubenswrapper[4967]: E1121 15:38:26.095005 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 15:38:26 crc kubenswrapper[4967]: E1121 15:38:26.095181 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lv72s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rr6kq_openshift-marketplace(8caeadee-cb78-47a9-b93f-e4a8e270a952): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 15:38:26 crc kubenswrapper[4967]: E1121 15:38:26.096488 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rr6kq" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" Nov 21 15:38:26 crc kubenswrapper[4967]: E1121 15:38:26.291267 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rr6kq" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" Nov 21 15:38:27 crc kubenswrapper[4967]: I1121 15:38:27.299385 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerStarted","Data":"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e"} Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.307638 4967 generic.go:334] "Generic (PLEG): container finished" podID="eedebded-dcce-4646-837f-26b33ed68cfd" containerID="8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962" exitCode=0 Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.307743 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerDied","Data":"8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962"} Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.314814 4967 generic.go:334] "Generic (PLEG): container finished" podID="92e1234d-95c3-4198-9144-3d993ce2c551" containerID="44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e" exitCode=0 Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.314960 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerDied","Data":"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e"} Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.319986 4967 generic.go:334] "Generic (PLEG): container finished" podID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerID="7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343" exitCode=0 Nov 21 15:38:28 crc kubenswrapper[4967]: I1121 15:38:28.320035 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerDied","Data":"7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343"} Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.327631 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerStarted","Data":"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97"} Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.331746 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerStarted","Data":"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5"} Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.334852 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerStarted","Data":"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01"} Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.346077 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dfgb9" podStartSLOduration=2.361484231 podStartE2EDuration="48.346056275s" podCreationTimestamp="2025-11-21 15:37:41 +0000 UTC" firstStartedPulling="2025-11-21 15:37:42.879445596 +0000 UTC m=+151.137966604" lastFinishedPulling="2025-11-21 15:38:28.86401764 +0000 UTC m=+197.122538648" observedRunningTime="2025-11-21 15:38:29.343979545 +0000 UTC m=+197.602500573" watchObservedRunningTime="2025-11-21 15:38:29.346056275 +0000 UTC m=+197.604577273" Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.371750 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w7s76" podStartSLOduration=2.49318998 podStartE2EDuration="48.371725337s" podCreationTimestamp="2025-11-21 15:37:41 +0000 UTC" firstStartedPulling="2025-11-21 15:37:42.927443744 +0000 UTC m=+151.185964752" lastFinishedPulling="2025-11-21 15:38:28.805979101 +0000 UTC m=+197.064500109" observedRunningTime="2025-11-21 15:38:29.360649162 +0000 UTC m=+197.619170180" watchObservedRunningTime="2025-11-21 15:38:29.371725337 +0000 UTC m=+197.630246345" Nov 21 15:38:29 crc kubenswrapper[4967]: I1121 15:38:29.386100 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7kkh6" podStartSLOduration=3.415130862 podStartE2EDuration="49.386079177s" podCreationTimestamp="2025-11-21 15:37:40 +0000 UTC" firstStartedPulling="2025-11-21 15:37:42.884161634 +0000 UTC m=+151.142682652" lastFinishedPulling="2025-11-21 15:38:28.855109959 +0000 UTC m=+197.113630967" observedRunningTime="2025-11-21 15:38:29.383716208 +0000 UTC m=+197.642237216" watchObservedRunningTime="2025-11-21 15:38:29.386079177 +0000 UTC m=+197.644600185" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.347302 4967 generic.go:334] "Generic (PLEG): container finished" podID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerID="47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098" exitCode=0 Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.347343 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerDied","Data":"47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098"} Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.401248 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.401296 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.575791 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.836116 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.836545 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.881886 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.970397 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:31 crc kubenswrapper[4967]: I1121 15:38:31.970470 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:32 crc kubenswrapper[4967]: I1121 15:38:32.017713 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:32 crc kubenswrapper[4967]: I1121 15:38:32.357710 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerStarted","Data":"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837"} Nov 21 15:38:32 crc kubenswrapper[4967]: I1121 15:38:32.382677 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m5b62" podStartSLOduration=2.5116322540000002 podStartE2EDuration="51.382650744s" podCreationTimestamp="2025-11-21 15:37:41 +0000 UTC" firstStartedPulling="2025-11-21 15:37:42.887887502 +0000 UTC m=+151.146408510" lastFinishedPulling="2025-11-21 15:38:31.758905992 +0000 UTC m=+200.017427000" observedRunningTime="2025-11-21 15:38:32.381048968 +0000 UTC m=+200.639569976" watchObservedRunningTime="2025-11-21 15:38:32.382650744 +0000 UTC m=+200.641171752" Nov 21 15:38:34 crc kubenswrapper[4967]: I1121 15:38:34.370746 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerStarted","Data":"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182"} Nov 21 15:38:35 crc kubenswrapper[4967]: I1121 15:38:35.377696 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerStarted","Data":"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489"} Nov 21 15:38:35 crc kubenswrapper[4967]: I1121 15:38:35.379741 4967 generic.go:334] "Generic (PLEG): container finished" podID="e056f496-158f-4033-8d35-a93e65357dbb" containerID="ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182" exitCode=0 Nov 21 15:38:35 crc kubenswrapper[4967]: I1121 15:38:35.379791 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerDied","Data":"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182"} Nov 21 15:38:36 crc kubenswrapper[4967]: I1121 15:38:36.387171 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerStarted","Data":"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795"} Nov 21 15:38:36 crc kubenswrapper[4967]: I1121 15:38:36.388720 4967 generic.go:334] "Generic (PLEG): container finished" podID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerID="691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489" exitCode=0 Nov 21 15:38:36 crc kubenswrapper[4967]: I1121 15:38:36.388788 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerDied","Data":"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489"} Nov 21 15:38:36 crc kubenswrapper[4967]: I1121 15:38:36.406999 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-scgn5" podStartSLOduration=2.5265046289999997 podStartE2EDuration="53.406979378s" podCreationTimestamp="2025-11-21 15:37:43 +0000 UTC" firstStartedPulling="2025-11-21 15:37:44.994029457 +0000 UTC m=+153.252550465" lastFinishedPulling="2025-11-21 15:38:35.874504206 +0000 UTC m=+204.133025214" observedRunningTime="2025-11-21 15:38:36.406890185 +0000 UTC m=+204.665411193" watchObservedRunningTime="2025-11-21 15:38:36.406979378 +0000 UTC m=+204.665500386" Nov 21 15:38:37 crc kubenswrapper[4967]: I1121 15:38:37.395806 4967 generic.go:334] "Generic (PLEG): container finished" podID="b39663f2-10f5-47c8-817d-7667d49539a0" containerID="40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079" exitCode=0 Nov 21 15:38:37 crc kubenswrapper[4967]: I1121 15:38:37.395903 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerDied","Data":"40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079"} Nov 21 15:38:37 crc kubenswrapper[4967]: I1121 15:38:37.404245 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerStarted","Data":"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216"} Nov 21 15:38:37 crc kubenswrapper[4967]: I1121 15:38:37.432977 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dmzs6" podStartSLOduration=4.669916448 podStartE2EDuration="53.432947389s" podCreationTimestamp="2025-11-21 15:37:44 +0000 UTC" firstStartedPulling="2025-11-21 15:37:48.045124656 +0000 UTC m=+156.303645664" lastFinishedPulling="2025-11-21 15:38:36.808155597 +0000 UTC m=+205.066676605" observedRunningTime="2025-11-21 15:38:37.428803638 +0000 UTC m=+205.687324656" watchObservedRunningTime="2025-11-21 15:38:37.432947389 +0000 UTC m=+205.691468417" Nov 21 15:38:38 crc kubenswrapper[4967]: I1121 15:38:38.411592 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerStarted","Data":"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227"} Nov 21 15:38:38 crc kubenswrapper[4967]: I1121 15:38:38.431560 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-48b7h" podStartSLOduration=3.574786312 podStartE2EDuration="56.431540141s" podCreationTimestamp="2025-11-21 15:37:42 +0000 UTC" firstStartedPulling="2025-11-21 15:37:44.996126449 +0000 UTC m=+153.254647457" lastFinishedPulling="2025-11-21 15:38:37.852880278 +0000 UTC m=+206.111401286" observedRunningTime="2025-11-21 15:38:38.429068169 +0000 UTC m=+206.687589177" watchObservedRunningTime="2025-11-21 15:38:38.431540141 +0000 UTC m=+206.690061149" Nov 21 15:38:41 crc kubenswrapper[4967]: I1121 15:38:41.445387 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:38:41 crc kubenswrapper[4967]: I1121 15:38:41.791409 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:41 crc kubenswrapper[4967]: I1121 15:38:41.791972 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:41 crc kubenswrapper[4967]: I1121 15:38:41.830186 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:41 crc kubenswrapper[4967]: I1121 15:38:41.874150 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:38:42 crc kubenswrapper[4967]: I1121 15:38:42.014091 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:42 crc kubenswrapper[4967]: I1121 15:38:42.469243 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.464417 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.464882 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.509211 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.760571 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.760693 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:43 crc kubenswrapper[4967]: I1121 15:38:43.805701 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.444344 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerStarted","Data":"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed"} Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.493126 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.494217 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.526908 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.527143 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m5b62" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="registry-server" containerID="cri-o://bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837" gracePeriod=2 Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.722129 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.722409 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dfgb9" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="registry-server" containerID="cri-o://a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97" gracePeriod=2 Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.951152 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.955589 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:44 crc kubenswrapper[4967]: I1121 15:38:44.955641 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.016118 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.087654 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content\") pod \"aba8d061-5e25-4f5b-84ae-03d940117fe4\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.087784 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities\") pod \"aba8d061-5e25-4f5b-84ae-03d940117fe4\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.087844 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gt9c\" (UniqueName: \"kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c\") pod \"aba8d061-5e25-4f5b-84ae-03d940117fe4\" (UID: \"aba8d061-5e25-4f5b-84ae-03d940117fe4\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.089289 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities" (OuterVolumeSpecName: "utilities") pod "aba8d061-5e25-4f5b-84ae-03d940117fe4" (UID: "aba8d061-5e25-4f5b-84ae-03d940117fe4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.095748 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c" (OuterVolumeSpecName: "kube-api-access-9gt9c") pod "aba8d061-5e25-4f5b-84ae-03d940117fe4" (UID: "aba8d061-5e25-4f5b-84ae-03d940117fe4"). InnerVolumeSpecName "kube-api-access-9gt9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.139104 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.161387 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aba8d061-5e25-4f5b-84ae-03d940117fe4" (UID: "aba8d061-5e25-4f5b-84ae-03d940117fe4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.193654 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gt9c\" (UniqueName: \"kubernetes.io/projected/aba8d061-5e25-4f5b-84ae-03d940117fe4-kube-api-access-9gt9c\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.193707 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.193719 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aba8d061-5e25-4f5b-84ae-03d940117fe4-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.294966 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities\") pod \"92e1234d-95c3-4198-9144-3d993ce2c551\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.295012 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbch9\" (UniqueName: \"kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9\") pod \"92e1234d-95c3-4198-9144-3d993ce2c551\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.295036 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content\") pod \"92e1234d-95c3-4198-9144-3d993ce2c551\" (UID: \"92e1234d-95c3-4198-9144-3d993ce2c551\") " Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.296584 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities" (OuterVolumeSpecName: "utilities") pod "92e1234d-95c3-4198-9144-3d993ce2c551" (UID: "92e1234d-95c3-4198-9144-3d993ce2c551"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.298678 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9" (OuterVolumeSpecName: "kube-api-access-bbch9") pod "92e1234d-95c3-4198-9144-3d993ce2c551" (UID: "92e1234d-95c3-4198-9144-3d993ce2c551"). InnerVolumeSpecName "kube-api-access-bbch9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.350609 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92e1234d-95c3-4198-9144-3d993ce2c551" (UID: "92e1234d-95c3-4198-9144-3d993ce2c551"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.396664 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.396895 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbch9\" (UniqueName: \"kubernetes.io/projected/92e1234d-95c3-4198-9144-3d993ce2c551-kube-api-access-bbch9\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.396910 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92e1234d-95c3-4198-9144-3d993ce2c551-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.450620 4967 generic.go:334] "Generic (PLEG): container finished" podID="92e1234d-95c3-4198-9144-3d993ce2c551" containerID="a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97" exitCode=0 Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.450699 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerDied","Data":"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97"} Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.450759 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfgb9" event={"ID":"92e1234d-95c3-4198-9144-3d993ce2c551","Type":"ContainerDied","Data":"5002d11c262e98bc62c20c798a315350090c79841aa954a90790e0cb678e3088"} Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.450780 4967 scope.go:117] "RemoveContainer" containerID="a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.451004 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfgb9" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.454285 4967 generic.go:334] "Generic (PLEG): container finished" podID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerID="bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837" exitCode=0 Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.454397 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerDied","Data":"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837"} Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.454428 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m5b62" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.454475 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m5b62" event={"ID":"aba8d061-5e25-4f5b-84ae-03d940117fe4","Type":"ContainerDied","Data":"5a3db8d50000d88bae6107a82a605adf9a39f3f4e2645038230458fff1222d1f"} Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.456302 4967 generic.go:334] "Generic (PLEG): container finished" podID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerID="29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed" exitCode=0 Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.456410 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerDied","Data":"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed"} Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.472656 4967 scope.go:117] "RemoveContainer" containerID="44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.504890 4967 scope.go:117] "RemoveContainer" containerID="58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.505665 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.506468 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.517581 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dfgb9"] Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.522671 4967 scope.go:117] "RemoveContainer" containerID="a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.523194 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97\": container with ID starting with a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97 not found: ID does not exist" containerID="a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.523280 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97"} err="failed to get container status \"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97\": rpc error: code = NotFound desc = could not find container \"a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97\": container with ID starting with a17893c9a577ee1ac421455e5b073e75768ffb9217e42261f2caea6dfde8fe97 not found: ID does not exist" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.523406 4967 scope.go:117] "RemoveContainer" containerID="44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.523739 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e\": container with ID starting with 44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e not found: ID does not exist" containerID="44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.523790 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e"} err="failed to get container status \"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e\": rpc error: code = NotFound desc = could not find container \"44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e\": container with ID starting with 44ce792d3f7bfc16b4a116cfa2791d0b69505cfdbd8f68658c850125e6d0bf1e not found: ID does not exist" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.523825 4967 scope.go:117] "RemoveContainer" containerID="58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.524087 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376\": container with ID starting with 58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376 not found: ID does not exist" containerID="58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.524170 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376"} err="failed to get container status \"58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376\": rpc error: code = NotFound desc = could not find container \"58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376\": container with ID starting with 58b053a618d5fa706c414caace41257bae7dd7ec04872a7dc67087bab12ab376 not found: ID does not exist" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.524235 4967 scope.go:117] "RemoveContainer" containerID="bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.524747 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.532484 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m5b62"] Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.539669 4967 scope.go:117] "RemoveContainer" containerID="47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.552674 4967 scope.go:117] "RemoveContainer" containerID="094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.611532 4967 scope.go:117] "RemoveContainer" containerID="bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.612069 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837\": container with ID starting with bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837 not found: ID does not exist" containerID="bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.612177 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837"} err="failed to get container status \"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837\": rpc error: code = NotFound desc = could not find container \"bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837\": container with ID starting with bf9c74d2fbaf1ca1ef06cb38f48b2d0654e7ebe07f319bf6d5f34fb5a05bf837 not found: ID does not exist" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.612277 4967 scope.go:117] "RemoveContainer" containerID="47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.612904 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098\": container with ID starting with 47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098 not found: ID does not exist" containerID="47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.612935 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098"} err="failed to get container status \"47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098\": rpc error: code = NotFound desc = could not find container \"47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098\": container with ID starting with 47e2658903171608718c007d8045157acee7296657216f638257e5aaf8a8c098 not found: ID does not exist" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.612959 4967 scope.go:117] "RemoveContainer" containerID="094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22" Nov 21 15:38:45 crc kubenswrapper[4967]: E1121 15:38:45.613335 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22\": container with ID starting with 094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22 not found: ID does not exist" containerID="094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22" Nov 21 15:38:45 crc kubenswrapper[4967]: I1121 15:38:45.613381 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22"} err="failed to get container status \"094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22\": rpc error: code = NotFound desc = could not find container \"094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22\": container with ID starting with 094b233bd87376378c8543f28bd94301e92493ca508df396e7feb8d9ba83ab22 not found: ID does not exist" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.466806 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerStarted","Data":"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4"} Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.489909 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rr6kq" podStartSLOduration=4.636977724 podStartE2EDuration="1m2.489891591s" podCreationTimestamp="2025-11-21 15:37:44 +0000 UTC" firstStartedPulling="2025-11-21 15:37:48.055527539 +0000 UTC m=+156.314048547" lastFinishedPulling="2025-11-21 15:38:45.908441406 +0000 UTC m=+214.166962414" observedRunningTime="2025-11-21 15:38:46.487615524 +0000 UTC m=+214.746136532" watchObservedRunningTime="2025-11-21 15:38:46.489891591 +0000 UTC m=+214.748412599" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.522360 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.522432 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.522494 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.523112 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.523171 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c" gracePeriod=600 Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.545661 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" path="/var/lib/kubelet/pods/92e1234d-95c3-4198-9144-3d993ce2c551/volumes" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.546403 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" path="/var/lib/kubelet/pods/aba8d061-5e25-4f5b-84ae-03d940117fe4/volumes" Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.923524 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:38:46 crc kubenswrapper[4967]: I1121 15:38:46.923765 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-scgn5" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="registry-server" containerID="cri-o://0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795" gracePeriod=2 Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.125752 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.332808 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.424703 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5fxj\" (UniqueName: \"kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj\") pod \"e056f496-158f-4033-8d35-a93e65357dbb\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.424780 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities\") pod \"e056f496-158f-4033-8d35-a93e65357dbb\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.424914 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content\") pod \"e056f496-158f-4033-8d35-a93e65357dbb\" (UID: \"e056f496-158f-4033-8d35-a93e65357dbb\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.426136 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities" (OuterVolumeSpecName: "utilities") pod "e056f496-158f-4033-8d35-a93e65357dbb" (UID: "e056f496-158f-4033-8d35-a93e65357dbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.431107 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj" (OuterVolumeSpecName: "kube-api-access-h5fxj") pod "e056f496-158f-4033-8d35-a93e65357dbb" (UID: "e056f496-158f-4033-8d35-a93e65357dbb"). InnerVolumeSpecName "kube-api-access-h5fxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.444030 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e056f496-158f-4033-8d35-a93e65357dbb" (UID: "e056f496-158f-4033-8d35-a93e65357dbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.477592 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c" exitCode=0 Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.477655 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c"} Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.477715 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b"} Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.481786 4967 generic.go:334] "Generic (PLEG): container finished" podID="e056f496-158f-4033-8d35-a93e65357dbb" containerID="0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795" exitCode=0 Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.482069 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dmzs6" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="registry-server" containerID="cri-o://eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216" gracePeriod=2 Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.482141 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-scgn5" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.482071 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerDied","Data":"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795"} Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.482594 4967 scope.go:117] "RemoveContainer" containerID="0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.482490 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-scgn5" event={"ID":"e056f496-158f-4033-8d35-a93e65357dbb","Type":"ContainerDied","Data":"e68aee7f462f4020994cb08e392527ce9a5534477edcfc1085118a6f2c4b6598"} Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.523434 4967 scope.go:117] "RemoveContainer" containerID="ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.535080 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.535130 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5fxj\" (UniqueName: \"kubernetes.io/projected/e056f496-158f-4033-8d35-a93e65357dbb-kube-api-access-h5fxj\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.535146 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e056f496-158f-4033-8d35-a93e65357dbb-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.535174 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.544593 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-scgn5"] Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.623742 4967 scope.go:117] "RemoveContainer" containerID="b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.642849 4967 scope.go:117] "RemoveContainer" containerID="0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795" Nov 21 15:38:47 crc kubenswrapper[4967]: E1121 15:38:47.643382 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795\": container with ID starting with 0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795 not found: ID does not exist" containerID="0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.643419 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795"} err="failed to get container status \"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795\": rpc error: code = NotFound desc = could not find container \"0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795\": container with ID starting with 0ca93dff326f00afcb32ac31cfda6112b913aaa50be4ad4ffedc4f54737c6795 not found: ID does not exist" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.643441 4967 scope.go:117] "RemoveContainer" containerID="ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182" Nov 21 15:38:47 crc kubenswrapper[4967]: E1121 15:38:47.643758 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182\": container with ID starting with ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182 not found: ID does not exist" containerID="ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.643776 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182"} err="failed to get container status \"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182\": rpc error: code = NotFound desc = could not find container \"ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182\": container with ID starting with ff22e76b259042332e128313145e1dc1eabb19991f7a4bd5dad06f8fdf4e7182 not found: ID does not exist" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.643789 4967 scope.go:117] "RemoveContainer" containerID="b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5" Nov 21 15:38:47 crc kubenswrapper[4967]: E1121 15:38:47.644031 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5\": container with ID starting with b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5 not found: ID does not exist" containerID="b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.644052 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5"} err="failed to get container status \"b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5\": rpc error: code = NotFound desc = could not find container \"b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5\": container with ID starting with b42be99dd718aa12e04941d91227d2d75054c32ce9b9a15202ed3a103f5de7d5 not found: ID does not exist" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.820981 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.943355 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities\") pod \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.943436 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2rml\" (UniqueName: \"kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml\") pod \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.943517 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content\") pod \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\" (UID: \"0ceb149c-9ccd-44c5-b44d-6f63435abda7\") " Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.944113 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities" (OuterVolumeSpecName: "utilities") pod "0ceb149c-9ccd-44c5-b44d-6f63435abda7" (UID: "0ceb149c-9ccd-44c5-b44d-6f63435abda7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:47 crc kubenswrapper[4967]: I1121 15:38:47.946185 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml" (OuterVolumeSpecName: "kube-api-access-f2rml") pod "0ceb149c-9ccd-44c5-b44d-6f63435abda7" (UID: "0ceb149c-9ccd-44c5-b44d-6f63435abda7"). InnerVolumeSpecName "kube-api-access-f2rml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.045442 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.045481 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2rml\" (UniqueName: \"kubernetes.io/projected/0ceb149c-9ccd-44c5-b44d-6f63435abda7-kube-api-access-f2rml\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.051383 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0ceb149c-9ccd-44c5-b44d-6f63435abda7" (UID: "0ceb149c-9ccd-44c5-b44d-6f63435abda7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.146571 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ceb149c-9ccd-44c5-b44d-6f63435abda7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.495440 4967 generic.go:334] "Generic (PLEG): container finished" podID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerID="eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216" exitCode=0 Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.495558 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dmzs6" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.495547 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerDied","Data":"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216"} Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.496144 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dmzs6" event={"ID":"0ceb149c-9ccd-44c5-b44d-6f63435abda7","Type":"ContainerDied","Data":"fc9b15a5d11db00bc9f86b1e0ee4b3077171b8d5b2c75e41c78c3553b9ea6c82"} Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.496171 4967 scope.go:117] "RemoveContainer" containerID="eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.517823 4967 scope.go:117] "RemoveContainer" containerID="691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.530448 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.533147 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dmzs6"] Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.545138 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" path="/var/lib/kubelet/pods/0ceb149c-9ccd-44c5-b44d-6f63435abda7/volumes" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.546082 4967 scope.go:117] "RemoveContainer" containerID="8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.546430 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e056f496-158f-4033-8d35-a93e65357dbb" path="/var/lib/kubelet/pods/e056f496-158f-4033-8d35-a93e65357dbb/volumes" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.561948 4967 scope.go:117] "RemoveContainer" containerID="eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216" Nov 21 15:38:48 crc kubenswrapper[4967]: E1121 15:38:48.562702 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216\": container with ID starting with eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216 not found: ID does not exist" containerID="eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.562750 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216"} err="failed to get container status \"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216\": rpc error: code = NotFound desc = could not find container \"eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216\": container with ID starting with eafc47bd649b9f0b144d613960af8ce5a1e4aa11db30c988f714d4f1f6216216 not found: ID does not exist" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.562781 4967 scope.go:117] "RemoveContainer" containerID="691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489" Nov 21 15:38:48 crc kubenswrapper[4967]: E1121 15:38:48.563348 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489\": container with ID starting with 691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489 not found: ID does not exist" containerID="691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.563395 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489"} err="failed to get container status \"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489\": rpc error: code = NotFound desc = could not find container \"691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489\": container with ID starting with 691006760fcc19046907a566dd83a574c9369994aa34eaeff563397e777c2489 not found: ID does not exist" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.563433 4967 scope.go:117] "RemoveContainer" containerID="8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7" Nov 21 15:38:48 crc kubenswrapper[4967]: E1121 15:38:48.563754 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7\": container with ID starting with 8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7 not found: ID does not exist" containerID="8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7" Nov 21 15:38:48 crc kubenswrapper[4967]: I1121 15:38:48.563774 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7"} err="failed to get container status \"8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7\": rpc error: code = NotFound desc = could not find container \"8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7\": container with ID starting with 8728aa5f7a2dcac45c503ce90b28628e14b5a70f16f39a73b1b0e58d856320c7 not found: ID does not exist" Nov 21 15:38:54 crc kubenswrapper[4967]: I1121 15:38:54.595465 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:38:54 crc kubenswrapper[4967]: I1121 15:38:54.596256 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:38:54 crc kubenswrapper[4967]: I1121 15:38:54.645128 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:38:55 crc kubenswrapper[4967]: I1121 15:38:55.586149 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:39:02 crc kubenswrapper[4967]: I1121 15:39:02.077376 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.104682 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" containerID="cri-o://e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2" gracePeriod=15 Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.493410 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.525417 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-fsvxn"] Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528423 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528454 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528467 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99ae900c-764b-4b98-9fce-c259ef6fb004" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528474 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="99ae900c-764b-4b98-9fce-c259ef6fb004" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528484 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528490 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528497 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528503 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528511 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528517 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528528 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="efc2444b-afcd-4576-a510-6ebfcbdb7b01" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528535 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="efc2444b-afcd-4576-a510-6ebfcbdb7b01" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528546 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528552 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528562 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528568 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528575 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528581 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528587 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528600 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528611 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528616 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528622 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528628 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528638 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528644 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528651 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528658 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="extract-utilities" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.528665 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528671 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="extract-content" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528783 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="92e1234d-95c3-4198-9144-3d993ce2c551" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528795 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="efc2444b-afcd-4576-a510-6ebfcbdb7b01" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528810 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerName="oauth-openshift" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528818 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="99ae900c-764b-4b98-9fce-c259ef6fb004" containerName="pruner" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528827 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="aba8d061-5e25-4f5b-84ae-03d940117fe4" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528836 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e056f496-158f-4033-8d35-a93e65357dbb" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.528844 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ceb149c-9ccd-44c5-b44d-6f63435abda7" containerName="registry-server" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.529414 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.536038 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-fsvxn"] Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.626844 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.626929 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.626940 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627001 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627031 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627064 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627109 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627131 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627156 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627185 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627233 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcf2z\" (UniqueName: \"kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627260 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627284 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627307 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627351 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data\") pod \"396d79a1-4427-49b2-b16e-89fb27df71ec\" (UID: \"396d79a1-4427-49b2-b16e-89fb27df71ec\") " Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627528 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627558 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627580 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-policies\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627604 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627634 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627659 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5x6l\" (UniqueName: \"kubernetes.io/projected/48b89917-7db7-4e08-9d58-be37ac9b3e88-kube-api-access-t5x6l\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627681 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627712 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627737 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627766 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627799 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627825 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627849 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627879 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627892 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-dir\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627941 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.627966 4967 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.628017 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.628259 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.628355 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.633336 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.633622 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.633978 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.636360 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z" (OuterVolumeSpecName: "kube-api-access-mcf2z") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "kube-api-access-mcf2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.637406 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.639576 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.641785 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.642001 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.642233 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "396d79a1-4427-49b2-b16e-89fb27df71ec" (UID: "396d79a1-4427-49b2-b16e-89fb27df71ec"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728737 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-dir\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728785 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728807 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728829 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-policies\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728849 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728873 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728897 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5x6l\" (UniqueName: \"kubernetes.io/projected/48b89917-7db7-4e08-9d58-be37ac9b3e88-kube-api-access-t5x6l\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728917 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728944 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728969 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728993 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729016 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729036 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729057 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729098 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729111 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729123 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729135 4967 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729146 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729156 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729165 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729174 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcf2z\" (UniqueName: \"kubernetes.io/projected/396d79a1-4427-49b2-b16e-89fb27df71ec-kube-api-access-mcf2z\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729184 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729194 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729204 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.729213 4967 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/396d79a1-4427-49b2-b16e-89fb27df71ec-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.728825 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-dir\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.730087 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-audit-policies\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.730156 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-service-ca\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.732057 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.732118 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-cliconfig\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.733563 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-error\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.734149 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-login\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.734162 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-router-certs\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.734706 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.734989 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.735067 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-serving-cert\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.735252 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-system-session\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.735590 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/48b89917-7db7-4e08-9d58-be37ac9b3e88-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.744941 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5x6l\" (UniqueName: \"kubernetes.io/projected/48b89917-7db7-4e08-9d58-be37ac9b3e88-kube-api-access-t5x6l\") pod \"oauth-openshift-54b5c98c4-fsvxn\" (UID: \"48b89917-7db7-4e08-9d58-be37ac9b3e88\") " pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.760944 4967 generic.go:334] "Generic (PLEG): container finished" podID="396d79a1-4427-49b2-b16e-89fb27df71ec" containerID="e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2" exitCode=0 Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.761007 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" event={"ID":"396d79a1-4427-49b2-b16e-89fb27df71ec","Type":"ContainerDied","Data":"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2"} Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.761052 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" event={"ID":"396d79a1-4427-49b2-b16e-89fb27df71ec","Type":"ContainerDied","Data":"8b75d047d039c13a59c4f0500a4e00bc8007fd245afd8e2482178dc5748ac70c"} Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.761091 4967 scope.go:117] "RemoveContainer" containerID="e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.761022 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lb8zd" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.786088 4967 scope.go:117] "RemoveContainer" containerID="e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2" Nov 21 15:39:27 crc kubenswrapper[4967]: E1121 15:39:27.786626 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2\": container with ID starting with e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2 not found: ID does not exist" containerID="e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.786680 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2"} err="failed to get container status \"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2\": rpc error: code = NotFound desc = could not find container \"e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2\": container with ID starting with e18033091d7908c99e475e3781d20a778b6e792966cbf1725262f770005e3fd2 not found: ID does not exist" Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.798625 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.802724 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lb8zd"] Nov 21 15:39:27 crc kubenswrapper[4967]: I1121 15:39:27.856047 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.232879 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-54b5c98c4-fsvxn"] Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.543904 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="396d79a1-4427-49b2-b16e-89fb27df71ec" path="/var/lib/kubelet/pods/396d79a1-4427-49b2-b16e-89fb27df71ec/volumes" Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.768909 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" event={"ID":"48b89917-7db7-4e08-9d58-be37ac9b3e88","Type":"ContainerStarted","Data":"a1fd9fce2958303098bb9a3130aa706ea4bdd4e9483db061be353480949614f2"} Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.768992 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" event={"ID":"48b89917-7db7-4e08-9d58-be37ac9b3e88","Type":"ContainerStarted","Data":"33f31e1ef3802789b909d1fe7bfbb86b72ff4d387bce59e48f49c5132a1dc38f"} Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.769252 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:28 crc kubenswrapper[4967]: I1121 15:39:28.792102 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" podStartSLOduration=26.792078894 podStartE2EDuration="26.792078894s" podCreationTimestamp="2025-11-21 15:39:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:39:28.791273671 +0000 UTC m=+257.049794699" watchObservedRunningTime="2025-11-21 15:39:28.792078894 +0000 UTC m=+257.050599902" Nov 21 15:39:29 crc kubenswrapper[4967]: I1121 15:39:29.074497 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-54b5c98c4-fsvxn" Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.769323 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.771674 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7kkh6" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="registry-server" containerID="cri-o://6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5" gracePeriod=30 Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.777131 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.777492 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w7s76" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="registry-server" containerID="cri-o://7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01" gracePeriod=30 Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.793092 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.793345 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" containerID="cri-o://21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53" gracePeriod=30 Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.807412 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xntrp"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.808242 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.810660 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.810985 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-48b7h" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="registry-server" containerID="cri-o://59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227" gracePeriod=30 Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.817411 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.817690 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rr6kq" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="registry-server" containerID="cri-o://bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4" gracePeriod=30 Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.822707 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xntrp"] Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.998270 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.998380 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnvvj\" (UniqueName: \"kubernetes.io/projected/34e31926-b4b0-4c27-b2e9-8825d80a21f9-kube-api-access-jnvvj\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:45 crc kubenswrapper[4967]: I1121 15:39:45.998452 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.100073 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.100168 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnvvj\" (UniqueName: \"kubernetes.io/projected/34e31926-b4b0-4c27-b2e9-8825d80a21f9-kube-api-access-jnvvj\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.100261 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.102454 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.108254 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/34e31926-b4b0-4c27-b2e9-8825d80a21f9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.117386 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnvvj\" (UniqueName: \"kubernetes.io/projected/34e31926-b4b0-4c27-b2e9-8825d80a21f9-kube-api-access-jnvvj\") pod \"marketplace-operator-79b997595-xntrp\" (UID: \"34e31926-b4b0-4c27-b2e9-8825d80a21f9\") " pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.155897 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.256591 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.305484 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities\") pod \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.305612 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content\") pod \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.305642 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8vl5\" (UniqueName: \"kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5\") pod \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\" (UID: \"dc581da3-1d2d-4d88-a2a8-6729abd4b955\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.309264 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities" (OuterVolumeSpecName: "utilities") pod "dc581da3-1d2d-4d88-a2a8-6729abd4b955" (UID: "dc581da3-1d2d-4d88-a2a8-6729abd4b955"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.311506 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5" (OuterVolumeSpecName: "kube-api-access-f8vl5") pod "dc581da3-1d2d-4d88-a2a8-6729abd4b955" (UID: "dc581da3-1d2d-4d88-a2a8-6729abd4b955"). InnerVolumeSpecName "kube-api-access-f8vl5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.327891 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.330668 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.338464 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.353822 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.360947 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc581da3-1d2d-4d88-a2a8-6729abd4b955" (UID: "dc581da3-1d2d-4d88-a2a8-6729abd4b955"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406828 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") pod \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406888 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") pod \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406915 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv72s\" (UniqueName: \"kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s\") pod \"8caeadee-cb78-47a9-b93f-e4a8e270a952\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406935 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h72rn\" (UniqueName: \"kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn\") pod \"eedebded-dcce-4646-837f-26b33ed68cfd\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406960 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content\") pod \"eedebded-dcce-4646-837f-26b33ed68cfd\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.406981 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities\") pod \"8caeadee-cb78-47a9-b93f-e4a8e270a952\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407008 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities\") pod \"eedebded-dcce-4646-837f-26b33ed68cfd\" (UID: \"eedebded-dcce-4646-837f-26b33ed68cfd\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407037 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk2sx\" (UniqueName: \"kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx\") pod \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\" (UID: \"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407059 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content\") pod \"8caeadee-cb78-47a9-b93f-e4a8e270a952\" (UID: \"8caeadee-cb78-47a9-b93f-e4a8e270a952\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407077 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities\") pod \"b39663f2-10f5-47c8-817d-7667d49539a0\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407094 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content\") pod \"b39663f2-10f5-47c8-817d-7667d49539a0\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407111 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rljg\" (UniqueName: \"kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg\") pod \"b39663f2-10f5-47c8-817d-7667d49539a0\" (UID: \"b39663f2-10f5-47c8-817d-7667d49539a0\") " Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407261 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407273 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8vl5\" (UniqueName: \"kubernetes.io/projected/dc581da3-1d2d-4d88-a2a8-6729abd4b955-kube-api-access-f8vl5\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407283 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc581da3-1d2d-4d88-a2a8-6729abd4b955-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.407636 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" (UID: "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.411048 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn" (OuterVolumeSpecName: "kube-api-access-h72rn") pod "eedebded-dcce-4646-837f-26b33ed68cfd" (UID: "eedebded-dcce-4646-837f-26b33ed68cfd"). InnerVolumeSpecName "kube-api-access-h72rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.411763 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities" (OuterVolumeSpecName: "utilities") pod "b39663f2-10f5-47c8-817d-7667d49539a0" (UID: "b39663f2-10f5-47c8-817d-7667d49539a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.412281 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" (UID: "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.412364 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg" (OuterVolumeSpecName: "kube-api-access-8rljg") pod "b39663f2-10f5-47c8-817d-7667d49539a0" (UID: "b39663f2-10f5-47c8-817d-7667d49539a0"). InnerVolumeSpecName "kube-api-access-8rljg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.412788 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities" (OuterVolumeSpecName: "utilities") pod "8caeadee-cb78-47a9-b93f-e4a8e270a952" (UID: "8caeadee-cb78-47a9-b93f-e4a8e270a952"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.413373 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx" (OuterVolumeSpecName: "kube-api-access-tk2sx") pod "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" (UID: "4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7"). InnerVolumeSpecName "kube-api-access-tk2sx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.415712 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s" (OuterVolumeSpecName: "kube-api-access-lv72s") pod "8caeadee-cb78-47a9-b93f-e4a8e270a952" (UID: "8caeadee-cb78-47a9-b93f-e4a8e270a952"). InnerVolumeSpecName "kube-api-access-lv72s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.424952 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities" (OuterVolumeSpecName: "utilities") pod "eedebded-dcce-4646-837f-26b33ed68cfd" (UID: "eedebded-dcce-4646-837f-26b33ed68cfd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.442078 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b39663f2-10f5-47c8-817d-7667d49539a0" (UID: "b39663f2-10f5-47c8-817d-7667d49539a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.479244 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eedebded-dcce-4646-837f-26b33ed68cfd" (UID: "eedebded-dcce-4646-837f-26b33ed68cfd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508097 4967 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508132 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv72s\" (UniqueName: \"kubernetes.io/projected/8caeadee-cb78-47a9-b93f-e4a8e270a952-kube-api-access-lv72s\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508144 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h72rn\" (UniqueName: \"kubernetes.io/projected/eedebded-dcce-4646-837f-26b33ed68cfd-kube-api-access-h72rn\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508156 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508168 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508179 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eedebded-dcce-4646-837f-26b33ed68cfd-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508188 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk2sx\" (UniqueName: \"kubernetes.io/projected/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-kube-api-access-tk2sx\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508198 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508208 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b39663f2-10f5-47c8-817d-7667d49539a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508218 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rljg\" (UniqueName: \"kubernetes.io/projected/b39663f2-10f5-47c8-817d-7667d49539a0-kube-api-access-8rljg\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.508228 4967 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.509935 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8caeadee-cb78-47a9-b93f-e4a8e270a952" (UID: "8caeadee-cb78-47a9-b93f-e4a8e270a952"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.608915 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8caeadee-cb78-47a9-b93f-e4a8e270a952-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.642367 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xntrp"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.877249 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" event={"ID":"34e31926-b4b0-4c27-b2e9-8825d80a21f9","Type":"ContainerStarted","Data":"99255b8f3c2d850045b4caefa9f39a76d32d6756434ff6e49e985964ebeae5d6"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.877357 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" event={"ID":"34e31926-b4b0-4c27-b2e9-8825d80a21f9","Type":"ContainerStarted","Data":"3ff5f4fa1b6afccb0e711e2762021ff7aac6fd6ee80e12880dd736f3d05b6a93"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.877731 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879026 4967 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xntrp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" start-of-body= Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879160 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" podUID="34e31926-b4b0-4c27-b2e9-8825d80a21f9" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.55:8080/healthz\": dial tcp 10.217.0.55:8080: connect: connection refused" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879678 4967 generic.go:334] "Generic (PLEG): container finished" podID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerID="6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5" exitCode=0 Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879746 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7kkh6" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879753 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerDied","Data":"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879784 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7kkh6" event={"ID":"dc581da3-1d2d-4d88-a2a8-6729abd4b955","Type":"ContainerDied","Data":"7cd4b299605e6dbe28f1a7dbd2f544146d3b595cba0152aae99f48e9fad1c9ed"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.879803 4967 scope.go:117] "RemoveContainer" containerID="6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.882037 4967 generic.go:334] "Generic (PLEG): container finished" podID="b39663f2-10f5-47c8-817d-7667d49539a0" containerID="59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227" exitCode=0 Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.882064 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerDied","Data":"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.882096 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-48b7h" event={"ID":"b39663f2-10f5-47c8-817d-7667d49539a0","Type":"ContainerDied","Data":"0811fef926f86c2d0318f9e5b9db5be3a9b9e1430c32b09017ec398db96a61a6"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.882332 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-48b7h" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.886872 4967 generic.go:334] "Generic (PLEG): container finished" podID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerID="21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53" exitCode=0 Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.887007 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" event={"ID":"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7","Type":"ContainerDied","Data":"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.887045 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" event={"ID":"4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7","Type":"ContainerDied","Data":"0fc8b031a32707f7449225fd212f5bda1f81f1735172c032002820f472426b9a"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.888133 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ql9tj" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.890127 4967 generic.go:334] "Generic (PLEG): container finished" podID="eedebded-dcce-4646-837f-26b33ed68cfd" containerID="7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01" exitCode=0 Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.890208 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerDied","Data":"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.890344 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w7s76" event={"ID":"eedebded-dcce-4646-837f-26b33ed68cfd","Type":"ContainerDied","Data":"c9ee5d47253f201edd7899cb6cadbd9e4f7a4c883916818bbc124b78e4284bfb"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.890510 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w7s76" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.893747 4967 generic.go:334] "Generic (PLEG): container finished" podID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerID="bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4" exitCode=0 Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.893810 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rr6kq" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.893831 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerDied","Data":"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.894055 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rr6kq" event={"ID":"8caeadee-cb78-47a9-b93f-e4a8e270a952","Type":"ContainerDied","Data":"806f00f7ba86612cdf704acb8983af70e3b5cc900fff2588c81d9c575c08f8a7"} Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.897718 4967 scope.go:117] "RemoveContainer" containerID="7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.906912 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" podStartSLOduration=1.906885869 podStartE2EDuration="1.906885869s" podCreationTimestamp="2025-11-21 15:39:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:39:46.906860068 +0000 UTC m=+275.165381086" watchObservedRunningTime="2025-11-21 15:39:46.906885869 +0000 UTC m=+275.165406877" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.922121 4967 scope.go:117] "RemoveContainer" containerID="94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.923117 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.926356 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7kkh6"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.936291 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.949775 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-48b7h"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.954332 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.960039 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rr6kq"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.968034 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.972647 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ql9tj"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.975112 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.975583 4967 scope.go:117] "RemoveContainer" containerID="6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5" Nov 21 15:39:46 crc kubenswrapper[4967]: E1121 15:39:46.976698 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5\": container with ID starting with 6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5 not found: ID does not exist" containerID="6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.976748 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5"} err="failed to get container status \"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5\": rpc error: code = NotFound desc = could not find container \"6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5\": container with ID starting with 6dc881fc9c643750b5192534b34103bc5e28440a103c56c2e4d28098e4928df5 not found: ID does not exist" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.976826 4967 scope.go:117] "RemoveContainer" containerID="7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343" Nov 21 15:39:46 crc kubenswrapper[4967]: E1121 15:39:46.977081 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343\": container with ID starting with 7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343 not found: ID does not exist" containerID="7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.977111 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343"} err="failed to get container status \"7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343\": rpc error: code = NotFound desc = could not find container \"7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343\": container with ID starting with 7cd2ce61556b1f11ea0d58a8f8461a134e10b8b2e0041dabe7f474c35a60b343 not found: ID does not exist" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.977131 4967 scope.go:117] "RemoveContainer" containerID="94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd" Nov 21 15:39:46 crc kubenswrapper[4967]: E1121 15:39:46.977585 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd\": container with ID starting with 94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd not found: ID does not exist" containerID="94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.977618 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd"} err="failed to get container status \"94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd\": rpc error: code = NotFound desc = could not find container \"94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd\": container with ID starting with 94490ca1ff936d40d37e4dfbf9fcdb4d254dce23fc2a2c1597c684cdbba902dd not found: ID does not exist" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.977640 4967 scope.go:117] "RemoveContainer" containerID="59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227" Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.978001 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w7s76"] Nov 21 15:39:46 crc kubenswrapper[4967]: I1121 15:39:46.990514 4967 scope.go:117] "RemoveContainer" containerID="40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.004033 4967 scope.go:117] "RemoveContainer" containerID="c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.025659 4967 scope.go:117] "RemoveContainer" containerID="59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.027049 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227\": container with ID starting with 59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227 not found: ID does not exist" containerID="59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.027097 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227"} err="failed to get container status \"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227\": rpc error: code = NotFound desc = could not find container \"59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227\": container with ID starting with 59eb9d4818a2ba08609393163f3189cf2c608c0ff1b1903d145c67b7d904b227 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.027130 4967 scope.go:117] "RemoveContainer" containerID="40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.027541 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079\": container with ID starting with 40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079 not found: ID does not exist" containerID="40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.027579 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079"} err="failed to get container status \"40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079\": rpc error: code = NotFound desc = could not find container \"40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079\": container with ID starting with 40a81b97b0024dd4ee856d7423e5a35f0bf7574fb530f4c39e8f58ea9a781079 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.027611 4967 scope.go:117] "RemoveContainer" containerID="c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.028787 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde\": container with ID starting with c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde not found: ID does not exist" containerID="c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.028811 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde"} err="failed to get container status \"c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde\": rpc error: code = NotFound desc = could not find container \"c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde\": container with ID starting with c441a63906fe2df6e3f9524130e05ce9935b85f240c3176191c370ef496e7cde not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.028828 4967 scope.go:117] "RemoveContainer" containerID="21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.042546 4967 scope.go:117] "RemoveContainer" containerID="21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.043284 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53\": container with ID starting with 21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53 not found: ID does not exist" containerID="21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.043349 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53"} err="failed to get container status \"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53\": rpc error: code = NotFound desc = could not find container \"21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53\": container with ID starting with 21d74f30a81d3a5b02d4b2f4279acd97030b4cdc71e86cd7f3717a1601757c53 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.043383 4967 scope.go:117] "RemoveContainer" containerID="7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.059519 4967 scope.go:117] "RemoveContainer" containerID="8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.078035 4967 scope.go:117] "RemoveContainer" containerID="24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.104014 4967 scope.go:117] "RemoveContainer" containerID="7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.104607 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01\": container with ID starting with 7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01 not found: ID does not exist" containerID="7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.104640 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01"} err="failed to get container status \"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01\": rpc error: code = NotFound desc = could not find container \"7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01\": container with ID starting with 7def8e915eb88fbf43a02194283fa6224cb8c298450445eaf79fc8d93b2bfa01 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.104708 4967 scope.go:117] "RemoveContainer" containerID="8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.105605 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962\": container with ID starting with 8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962 not found: ID does not exist" containerID="8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.105640 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962"} err="failed to get container status \"8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962\": rpc error: code = NotFound desc = could not find container \"8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962\": container with ID starting with 8d302365c1c4dcddd284281d5b42fc5ba3590fdb93b322ae2ba62a299fcf3962 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.105661 4967 scope.go:117] "RemoveContainer" containerID="24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.105891 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2\": container with ID starting with 24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2 not found: ID does not exist" containerID="24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.105913 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2"} err="failed to get container status \"24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2\": rpc error: code = NotFound desc = could not find container \"24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2\": container with ID starting with 24e14ca992b7668b2dc17d1cf67d0f58edde4f9432133e4478b9d1ea6850afe2 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.105927 4967 scope.go:117] "RemoveContainer" containerID="bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.121022 4967 scope.go:117] "RemoveContainer" containerID="29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.135552 4967 scope.go:117] "RemoveContainer" containerID="c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.150168 4967 scope.go:117] "RemoveContainer" containerID="bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.150774 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4\": container with ID starting with bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4 not found: ID does not exist" containerID="bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.150805 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4"} err="failed to get container status \"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4\": rpc error: code = NotFound desc = could not find container \"bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4\": container with ID starting with bb75e2cb0ec4d4b1540e63f62360d225e73aa38da442b7d496a406ffb577a2c4 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.150844 4967 scope.go:117] "RemoveContainer" containerID="29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.151238 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed\": container with ID starting with 29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed not found: ID does not exist" containerID="29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.151286 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed"} err="failed to get container status \"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed\": rpc error: code = NotFound desc = could not find container \"29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed\": container with ID starting with 29283dd15fdffde7fca61226c0b0b890f9b598bf5c548b44a16283c57707dbed not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.151355 4967 scope.go:117] "RemoveContainer" containerID="c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.151753 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38\": container with ID starting with c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38 not found: ID does not exist" containerID="c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.151785 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38"} err="failed to get container status \"c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38\": rpc error: code = NotFound desc = could not find container \"c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38\": container with ID starting with c6ff110f39c226806933a0319c484c40abd5d24066e24f6e1ced306cb36ead38 not found: ID does not exist" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.909674 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-xntrp" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988415 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q2vpn"] Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988697 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988716 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988729 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988735 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988743 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988750 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988756 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988763 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988776 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988782 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988794 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988800 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988805 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988811 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988819 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988825 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988836 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988843 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988850 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988856 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="extract-content" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988863 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988869 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988878 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988884 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" Nov 21 15:39:47 crc kubenswrapper[4967]: E1121 15:39:47.988896 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.988902 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="extract-utilities" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989000 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989011 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" containerName="marketplace-operator" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989022 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989029 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989038 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" containerName="registry-server" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.989870 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:47 crc kubenswrapper[4967]: I1121 15:39:47.992468 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.006446 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q2vpn"] Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.030884 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-catalog-content\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.030953 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9czz6\" (UniqueName: \"kubernetes.io/projected/e04b35e4-8722-44c7-8c0a-356e143f637e-kube-api-access-9czz6\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.031025 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-utilities\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.132027 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9czz6\" (UniqueName: \"kubernetes.io/projected/e04b35e4-8722-44c7-8c0a-356e143f637e-kube-api-access-9czz6\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.132118 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-utilities\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.132165 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-catalog-content\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.132732 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-catalog-content\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.132783 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e04b35e4-8722-44c7-8c0a-356e143f637e-utilities\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.151230 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9czz6\" (UniqueName: \"kubernetes.io/projected/e04b35e4-8722-44c7-8c0a-356e143f637e-kube-api-access-9czz6\") pod \"certified-operators-q2vpn\" (UID: \"e04b35e4-8722-44c7-8c0a-356e143f637e\") " pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.184743 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2dhwv"] Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.186188 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.189022 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.201606 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2dhwv"] Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.232930 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84bzn\" (UniqueName: \"kubernetes.io/projected/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-kube-api-access-84bzn\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.233005 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-utilities\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.233054 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-catalog-content\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.303749 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.335282 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84bzn\" (UniqueName: \"kubernetes.io/projected/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-kube-api-access-84bzn\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.335367 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-utilities\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.335416 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-catalog-content\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.336251 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-catalog-content\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.336358 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-utilities\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.358541 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84bzn\" (UniqueName: \"kubernetes.io/projected/58b7bcbc-c1ff-48a4-8d78-eded8239d6a4-kube-api-access-84bzn\") pod \"community-operators-2dhwv\" (UID: \"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4\") " pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.523301 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.543714 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7" path="/var/lib/kubelet/pods/4d14ef2b-0a55-4c5c-8f92-9ad0662b0cb7/volumes" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.544276 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8caeadee-cb78-47a9-b93f-e4a8e270a952" path="/var/lib/kubelet/pods/8caeadee-cb78-47a9-b93f-e4a8e270a952/volumes" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.545157 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b39663f2-10f5-47c8-817d-7667d49539a0" path="/var/lib/kubelet/pods/b39663f2-10f5-47c8-817d-7667d49539a0/volumes" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.546326 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc581da3-1d2d-4d88-a2a8-6729abd4b955" path="/var/lib/kubelet/pods/dc581da3-1d2d-4d88-a2a8-6729abd4b955/volumes" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.546941 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eedebded-dcce-4646-837f-26b33ed68cfd" path="/var/lib/kubelet/pods/eedebded-dcce-4646-837f-26b33ed68cfd/volumes" Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.729580 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q2vpn"] Nov 21 15:39:48 crc kubenswrapper[4967]: W1121 15:39:48.731274 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode04b35e4_8722_44c7_8c0a_356e143f637e.slice/crio-1c5d932bc2dcf0a34f7b13c7b8f7ddde51383d6aeec409d881f50d28a4d676a7 WatchSource:0}: Error finding container 1c5d932bc2dcf0a34f7b13c7b8f7ddde51383d6aeec409d881f50d28a4d676a7: Status 404 returned error can't find the container with id 1c5d932bc2dcf0a34f7b13c7b8f7ddde51383d6aeec409d881f50d28a4d676a7 Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.911809 4967 generic.go:334] "Generic (PLEG): container finished" podID="e04b35e4-8722-44c7-8c0a-356e143f637e" containerID="fde58bfbfe4edd32cf1480110e46972678ef139f08ac260eda9fb4520191515c" exitCode=0 Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.911909 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2vpn" event={"ID":"e04b35e4-8722-44c7-8c0a-356e143f637e","Type":"ContainerDied","Data":"fde58bfbfe4edd32cf1480110e46972678ef139f08ac260eda9fb4520191515c"} Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.911951 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2vpn" event={"ID":"e04b35e4-8722-44c7-8c0a-356e143f637e","Type":"ContainerStarted","Data":"1c5d932bc2dcf0a34f7b13c7b8f7ddde51383d6aeec409d881f50d28a4d676a7"} Nov 21 15:39:48 crc kubenswrapper[4967]: I1121 15:39:48.924096 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2dhwv"] Nov 21 15:39:48 crc kubenswrapper[4967]: W1121 15:39:48.939512 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58b7bcbc_c1ff_48a4_8d78_eded8239d6a4.slice/crio-0f7eef12722c780aca6637a4c74ee0469b0b541d7faaccd31bcc147bb861af12 WatchSource:0}: Error finding container 0f7eef12722c780aca6637a4c74ee0469b0b541d7faaccd31bcc147bb861af12: Status 404 returned error can't find the container with id 0f7eef12722c780aca6637a4c74ee0469b0b541d7faaccd31bcc147bb861af12 Nov 21 15:39:49 crc kubenswrapper[4967]: I1121 15:39:49.919109 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2vpn" event={"ID":"e04b35e4-8722-44c7-8c0a-356e143f637e","Type":"ContainerStarted","Data":"428238f23894f7cb09217ff1d1905406fab5f3b02aba72e1a97845fb690ef36b"} Nov 21 15:39:49 crc kubenswrapper[4967]: I1121 15:39:49.926269 4967 generic.go:334] "Generic (PLEG): container finished" podID="58b7bcbc-c1ff-48a4-8d78-eded8239d6a4" containerID="ec3ea7cd5f266b420d4717ce704e3315f42cdf1b8df757667d23d9d0112fbeba" exitCode=0 Nov 21 15:39:49 crc kubenswrapper[4967]: I1121 15:39:49.926432 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2dhwv" event={"ID":"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4","Type":"ContainerDied","Data":"ec3ea7cd5f266b420d4717ce704e3315f42cdf1b8df757667d23d9d0112fbeba"} Nov 21 15:39:49 crc kubenswrapper[4967]: I1121 15:39:49.926488 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2dhwv" event={"ID":"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4","Type":"ContainerStarted","Data":"0f7eef12722c780aca6637a4c74ee0469b0b541d7faaccd31bcc147bb861af12"} Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.381973 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-29zhf"] Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.383111 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.387512 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.412065 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-29zhf"] Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.463479 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-utilities\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.463530 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxjlc\" (UniqueName: \"kubernetes.io/projected/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-kube-api-access-rxjlc\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.463566 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-catalog-content\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.565665 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-utilities\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.565743 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxjlc\" (UniqueName: \"kubernetes.io/projected/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-kube-api-access-rxjlc\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.565827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-catalog-content\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.566355 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-utilities\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.566900 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-catalog-content\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.589464 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxjlc\" (UniqueName: \"kubernetes.io/projected/a8e33eb6-c76a-4d8b-896e-75ab69247a2a-kube-api-access-rxjlc\") pod \"redhat-marketplace-29zhf\" (UID: \"a8e33eb6-c76a-4d8b-896e-75ab69247a2a\") " pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.590535 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tnkcg"] Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.592806 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.599771 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.609262 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tnkcg"] Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.667100 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-utilities\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.667176 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-catalog-content\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.667244 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssj82\" (UniqueName: \"kubernetes.io/projected/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-kube-api-access-ssj82\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.723756 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.768800 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-catalog-content\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.768860 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssj82\" (UniqueName: \"kubernetes.io/projected/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-kube-api-access-ssj82\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.768919 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-utilities\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.769406 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-catalog-content\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.769699 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-utilities\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.788730 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssj82\" (UniqueName: \"kubernetes.io/projected/0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25-kube-api-access-ssj82\") pod \"redhat-operators-tnkcg\" (UID: \"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25\") " pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.935057 4967 generic.go:334] "Generic (PLEG): container finished" podID="e04b35e4-8722-44c7-8c0a-356e143f637e" containerID="428238f23894f7cb09217ff1d1905406fab5f3b02aba72e1a97845fb690ef36b" exitCode=0 Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.935417 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2vpn" event={"ID":"e04b35e4-8722-44c7-8c0a-356e143f637e","Type":"ContainerDied","Data":"428238f23894f7cb09217ff1d1905406fab5f3b02aba72e1a97845fb690ef36b"} Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.941417 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2dhwv" event={"ID":"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4","Type":"ContainerStarted","Data":"f7c553d0ac7f7749e0c8455785f2c4dfda85b54abb195e17f1559bc1c48f440a"} Nov 21 15:39:50 crc kubenswrapper[4967]: I1121 15:39:50.985291 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.177540 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-29zhf"] Nov 21 15:39:51 crc kubenswrapper[4967]: W1121 15:39:51.186598 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8e33eb6_c76a_4d8b_896e_75ab69247a2a.slice/crio-b60f39440952b50cba5ef15728b26487f6a668c15520e2f5fb488653014b6a05 WatchSource:0}: Error finding container b60f39440952b50cba5ef15728b26487f6a668c15520e2f5fb488653014b6a05: Status 404 returned error can't find the container with id b60f39440952b50cba5ef15728b26487f6a668c15520e2f5fb488653014b6a05 Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.188870 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tnkcg"] Nov 21 15:39:51 crc kubenswrapper[4967]: W1121 15:39:51.196803 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e20dcd4_a457_43b1_a0a0_5cb1ee78cf25.slice/crio-258abb2c3374b070e99eb1e000e7a7ed71c00170360eff5f5e4b6637972ae48d WatchSource:0}: Error finding container 258abb2c3374b070e99eb1e000e7a7ed71c00170360eff5f5e4b6637972ae48d: Status 404 returned error can't find the container with id 258abb2c3374b070e99eb1e000e7a7ed71c00170360eff5f5e4b6637972ae48d Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.947779 4967 generic.go:334] "Generic (PLEG): container finished" podID="58b7bcbc-c1ff-48a4-8d78-eded8239d6a4" containerID="f7c553d0ac7f7749e0c8455785f2c4dfda85b54abb195e17f1559bc1c48f440a" exitCode=0 Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.947853 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2dhwv" event={"ID":"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4","Type":"ContainerDied","Data":"f7c553d0ac7f7749e0c8455785f2c4dfda85b54abb195e17f1559bc1c48f440a"} Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.950073 4967 generic.go:334] "Generic (PLEG): container finished" podID="a8e33eb6-c76a-4d8b-896e-75ab69247a2a" containerID="b5335908e58c3d990be3099dffe256b209d5388a56969f0c83767fbc92b5ffa3" exitCode=0 Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.950530 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-29zhf" event={"ID":"a8e33eb6-c76a-4d8b-896e-75ab69247a2a","Type":"ContainerDied","Data":"b5335908e58c3d990be3099dffe256b209d5388a56969f0c83767fbc92b5ffa3"} Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.950554 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-29zhf" event={"ID":"a8e33eb6-c76a-4d8b-896e-75ab69247a2a","Type":"ContainerStarted","Data":"b60f39440952b50cba5ef15728b26487f6a668c15520e2f5fb488653014b6a05"} Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.954135 4967 generic.go:334] "Generic (PLEG): container finished" podID="0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25" containerID="cc7d17b7a4bfcd5ce523ffc3ae518fd538512bc4fbcea347236ff3fb7845f603" exitCode=0 Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.954238 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tnkcg" event={"ID":"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25","Type":"ContainerDied","Data":"cc7d17b7a4bfcd5ce523ffc3ae518fd538512bc4fbcea347236ff3fb7845f603"} Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.954291 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tnkcg" event={"ID":"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25","Type":"ContainerStarted","Data":"258abb2c3374b070e99eb1e000e7a7ed71c00170360eff5f5e4b6637972ae48d"} Nov 21 15:39:51 crc kubenswrapper[4967]: I1121 15:39:51.959679 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q2vpn" event={"ID":"e04b35e4-8722-44c7-8c0a-356e143f637e","Type":"ContainerStarted","Data":"76c77005e477bf1f2553d4dbe131d0cd8530d668fa405a8e6523e7815121c1e8"} Nov 21 15:39:52 crc kubenswrapper[4967]: I1121 15:39:52.046708 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q2vpn" podStartSLOduration=2.608451117 podStartE2EDuration="5.046678482s" podCreationTimestamp="2025-11-21 15:39:47 +0000 UTC" firstStartedPulling="2025-11-21 15:39:48.919682102 +0000 UTC m=+277.178203110" lastFinishedPulling="2025-11-21 15:39:51.357909477 +0000 UTC m=+279.616430475" observedRunningTime="2025-11-21 15:39:52.046153726 +0000 UTC m=+280.304674734" watchObservedRunningTime="2025-11-21 15:39:52.046678482 +0000 UTC m=+280.305199490" Nov 21 15:39:52 crc kubenswrapper[4967]: I1121 15:39:52.967296 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-29zhf" event={"ID":"a8e33eb6-c76a-4d8b-896e-75ab69247a2a","Type":"ContainerStarted","Data":"37bdd7ed6c368387ba48b8df3c09cfa38a74208068356bc7d2c5521839eed68f"} Nov 21 15:39:52 crc kubenswrapper[4967]: I1121 15:39:52.969828 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tnkcg" event={"ID":"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25","Type":"ContainerStarted","Data":"1912d13e91ffacde0939e18d8432a6c23dfbc2551274ccaa9f8458b7907f4987"} Nov 21 15:39:52 crc kubenswrapper[4967]: I1121 15:39:52.972174 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2dhwv" event={"ID":"58b7bcbc-c1ff-48a4-8d78-eded8239d6a4","Type":"ContainerStarted","Data":"877f6645178671a3ee014e43793fbb5083d903fd5a491337df4dd25fd0329caa"} Nov 21 15:39:53 crc kubenswrapper[4967]: I1121 15:39:53.033771 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2dhwv" podStartSLOduration=2.578696457 podStartE2EDuration="5.033754101s" podCreationTimestamp="2025-11-21 15:39:48 +0000 UTC" firstStartedPulling="2025-11-21 15:39:49.928040259 +0000 UTC m=+278.186561267" lastFinishedPulling="2025-11-21 15:39:52.383097903 +0000 UTC m=+280.641618911" observedRunningTime="2025-11-21 15:39:53.031207312 +0000 UTC m=+281.289728320" watchObservedRunningTime="2025-11-21 15:39:53.033754101 +0000 UTC m=+281.292275109" Nov 21 15:39:53 crc kubenswrapper[4967]: I1121 15:39:53.979483 4967 generic.go:334] "Generic (PLEG): container finished" podID="a8e33eb6-c76a-4d8b-896e-75ab69247a2a" containerID="37bdd7ed6c368387ba48b8df3c09cfa38a74208068356bc7d2c5521839eed68f" exitCode=0 Nov 21 15:39:53 crc kubenswrapper[4967]: I1121 15:39:53.979571 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-29zhf" event={"ID":"a8e33eb6-c76a-4d8b-896e-75ab69247a2a","Type":"ContainerDied","Data":"37bdd7ed6c368387ba48b8df3c09cfa38a74208068356bc7d2c5521839eed68f"} Nov 21 15:39:53 crc kubenswrapper[4967]: I1121 15:39:53.982486 4967 generic.go:334] "Generic (PLEG): container finished" podID="0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25" containerID="1912d13e91ffacde0939e18d8432a6c23dfbc2551274ccaa9f8458b7907f4987" exitCode=0 Nov 21 15:39:53 crc kubenswrapper[4967]: I1121 15:39:53.982565 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tnkcg" event={"ID":"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25","Type":"ContainerDied","Data":"1912d13e91ffacde0939e18d8432a6c23dfbc2551274ccaa9f8458b7907f4987"} Nov 21 15:39:54 crc kubenswrapper[4967]: I1121 15:39:54.989843 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-29zhf" event={"ID":"a8e33eb6-c76a-4d8b-896e-75ab69247a2a","Type":"ContainerStarted","Data":"698b26fa676ff5dbf577bb268260c9d84be96b53f4955497bcb624c9105cc15d"} Nov 21 15:39:55 crc kubenswrapper[4967]: I1121 15:39:55.009881 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-29zhf" podStartSLOduration=2.584935198 podStartE2EDuration="5.009861651s" podCreationTimestamp="2025-11-21 15:39:50 +0000 UTC" firstStartedPulling="2025-11-21 15:39:51.951670617 +0000 UTC m=+280.210191625" lastFinishedPulling="2025-11-21 15:39:54.37659707 +0000 UTC m=+282.635118078" observedRunningTime="2025-11-21 15:39:55.007165118 +0000 UTC m=+283.265686116" watchObservedRunningTime="2025-11-21 15:39:55.009861651 +0000 UTC m=+283.268382659" Nov 21 15:39:56 crc kubenswrapper[4967]: I1121 15:39:56.006235 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tnkcg" event={"ID":"0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25","Type":"ContainerStarted","Data":"f3fc473d36f232326941c7e7dac4bbde907900a5e2e10364752e57d57cad31bb"} Nov 21 15:39:56 crc kubenswrapper[4967]: I1121 15:39:56.026334 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tnkcg" podStartSLOduration=2.462358482 podStartE2EDuration="6.026246846s" podCreationTimestamp="2025-11-21 15:39:50 +0000 UTC" firstStartedPulling="2025-11-21 15:39:51.955825136 +0000 UTC m=+280.214346144" lastFinishedPulling="2025-11-21 15:39:55.5197135 +0000 UTC m=+283.778234508" observedRunningTime="2025-11-21 15:39:56.024987787 +0000 UTC m=+284.283508795" watchObservedRunningTime="2025-11-21 15:39:56.026246846 +0000 UTC m=+284.284767854" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.304385 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.304723 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.344277 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.524645 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.524905 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:58 crc kubenswrapper[4967]: I1121 15:39:58.567443 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:59 crc kubenswrapper[4967]: I1121 15:39:59.059513 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2dhwv" Nov 21 15:39:59 crc kubenswrapper[4967]: I1121 15:39:59.060289 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q2vpn" Nov 21 15:40:00 crc kubenswrapper[4967]: I1121 15:40:00.724736 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:40:00 crc kubenswrapper[4967]: I1121 15:40:00.725340 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:40:00 crc kubenswrapper[4967]: I1121 15:40:00.782470 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:40:00 crc kubenswrapper[4967]: I1121 15:40:00.985726 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:40:00 crc kubenswrapper[4967]: I1121 15:40:00.985807 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:40:01 crc kubenswrapper[4967]: I1121 15:40:01.071132 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-29zhf" Nov 21 15:40:02 crc kubenswrapper[4967]: I1121 15:40:02.022642 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tnkcg" podUID="0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25" containerName="registry-server" probeResult="failure" output=< Nov 21 15:40:02 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:40:02 crc kubenswrapper[4967]: > Nov 21 15:40:11 crc kubenswrapper[4967]: I1121 15:40:11.023153 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:40:11 crc kubenswrapper[4967]: I1121 15:40:11.070056 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tnkcg" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.792779 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8"] Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.794223 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.796585 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-root-ca.crt" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.796720 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-tls" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.796869 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"cluster-monitoring-operator-dockercfg-wwt9l" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.797751 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"openshift-service-ca.crt" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.798243 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"telemetry-config" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.815888 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8"] Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.816233 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/023c439d-3e6e-43bb-af2f-0cbc2982c355-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.816269 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtbw5\" (UniqueName: \"kubernetes.io/projected/023c439d-3e6e-43bb-af2f-0cbc2982c355-kube-api-access-rtbw5\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.816363 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/023c439d-3e6e-43bb-af2f-0cbc2982c355-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.917392 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtbw5\" (UniqueName: \"kubernetes.io/projected/023c439d-3e6e-43bb-af2f-0cbc2982c355-kube-api-access-rtbw5\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.917490 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/023c439d-3e6e-43bb-af2f-0cbc2982c355-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.917558 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/023c439d-3e6e-43bb-af2f-0cbc2982c355-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.918545 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-config\" (UniqueName: \"kubernetes.io/configmap/023c439d-3e6e-43bb-af2f-0cbc2982c355-telemetry-config\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.927066 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-monitoring-operator-tls\" (UniqueName: \"kubernetes.io/secret/023c439d-3e6e-43bb-af2f-0cbc2982c355-cluster-monitoring-operator-tls\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:16 crc kubenswrapper[4967]: I1121 15:40:16.949064 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtbw5\" (UniqueName: \"kubernetes.io/projected/023c439d-3e6e-43bb-af2f-0cbc2982c355-kube-api-access-rtbw5\") pod \"cluster-monitoring-operator-6d5b84845-64rc8\" (UID: \"023c439d-3e6e-43bb-af2f-0cbc2982c355\") " pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:17 crc kubenswrapper[4967]: I1121 15:40:17.117789 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" Nov 21 15:40:17 crc kubenswrapper[4967]: I1121 15:40:17.300150 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8"] Nov 21 15:40:18 crc kubenswrapper[4967]: I1121 15:40:18.128729 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" event={"ID":"023c439d-3e6e-43bb-af2f-0cbc2982c355","Type":"ContainerStarted","Data":"41188a9fa07a937998b6c355acf7b44197a4dc59ac9a3cde840b2efb22bf7e2c"} Nov 21 15:40:19 crc kubenswrapper[4967]: I1121 15:40:19.998941 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wwjmz"] Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.000375 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.091861 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wwjmz"] Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.147147 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67"] Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.151989 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" event={"ID":"023c439d-3e6e-43bb-af2f-0cbc2982c355","Type":"ContainerStarted","Data":"394db119323b7374f86227290b9337b8b50e6c7ebacc7e436fdfe4a0036fbe9f"} Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.152104 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.155336 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67"] Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.158053 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-dockercfg-2977n" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.158347 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-admission-webhook-tls" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.170501 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/cluster-monitoring-operator-6d5b84845-64rc8" podStartSLOduration=2.028408129 podStartE2EDuration="4.170478175s" podCreationTimestamp="2025-11-21 15:40:16 +0000 UTC" firstStartedPulling="2025-11-21 15:40:17.312360341 +0000 UTC m=+305.570881349" lastFinishedPulling="2025-11-21 15:40:19.454430387 +0000 UTC m=+307.712951395" observedRunningTime="2025-11-21 15:40:20.167107631 +0000 UTC m=+308.425628639" watchObservedRunningTime="2025-11-21 15:40:20.170478175 +0000 UTC m=+308.428999203" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173558 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-bound-sa-token\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173617 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-certificates\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173642 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/47d97778-e1bd-4c46-b16f-12b6341b9b3e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173681 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/47d97778-e1bd-4c46-b16f-12b6341b9b3e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173714 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdc4c\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-kube-api-access-qdc4c\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173736 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-trusted-ca\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173769 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.173796 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-tls\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.210076 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.275770 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/47d97778-e1bd-4c46-b16f-12b6341b9b3e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.276395 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdc4c\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-kube-api-access-qdc4c\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.276524 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-trusted-ca\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.276679 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-tls\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.276805 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-bound-sa-token\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.276984 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/f29282d3-e44b-4110-8847-88026fbefabb-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ks67\" (UID: \"f29282d3-e44b-4110-8847-88026fbefabb\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.277126 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-certificates\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.277267 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/47d97778-e1bd-4c46-b16f-12b6341b9b3e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.277993 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/47d97778-e1bd-4c46-b16f-12b6341b9b3e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.279162 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-certificates\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.279402 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/47d97778-e1bd-4c46-b16f-12b6341b9b3e-trusted-ca\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.283689 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/47d97778-e1bd-4c46-b16f-12b6341b9b3e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.284304 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-registry-tls\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.300253 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdc4c\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-kube-api-access-qdc4c\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.305110 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/47d97778-e1bd-4c46-b16f-12b6341b9b3e-bound-sa-token\") pod \"image-registry-66df7c8f76-wwjmz\" (UID: \"47d97778-e1bd-4c46-b16f-12b6341b9b3e\") " pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.318388 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.378384 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/f29282d3-e44b-4110-8847-88026fbefabb-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ks67\" (UID: \"f29282d3-e44b-4110-8847-88026fbefabb\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.382663 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-certificates\" (UniqueName: \"kubernetes.io/secret/f29282d3-e44b-4110-8847-88026fbefabb-tls-certificates\") pod \"prometheus-operator-admission-webhook-f54c54754-8ks67\" (UID: \"f29282d3-e44b-4110-8847-88026fbefabb\") " pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.467051 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.543718 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wwjmz"] Nov 21 15:40:20 crc kubenswrapper[4967]: W1121 15:40:20.545475 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47d97778_e1bd_4c46_b16f_12b6341b9b3e.slice/crio-7dc5a4c8104bf6217377af228d8f99ef17a2e4f8fae779f0900ae4a67abeb6f4 WatchSource:0}: Error finding container 7dc5a4c8104bf6217377af228d8f99ef17a2e4f8fae779f0900ae4a67abeb6f4: Status 404 returned error can't find the container with id 7dc5a4c8104bf6217377af228d8f99ef17a2e4f8fae779f0900ae4a67abeb6f4 Nov 21 15:40:20 crc kubenswrapper[4967]: I1121 15:40:20.690309 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67"] Nov 21 15:40:20 crc kubenswrapper[4967]: W1121 15:40:20.696736 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf29282d3_e44b_4110_8847_88026fbefabb.slice/crio-07b61e0f9703860894fa592e147437360f9384a4c16491b751313290a200f07c WatchSource:0}: Error finding container 07b61e0f9703860894fa592e147437360f9384a4c16491b751313290a200f07c: Status 404 returned error can't find the container with id 07b61e0f9703860894fa592e147437360f9384a4c16491b751313290a200f07c Nov 21 15:40:21 crc kubenswrapper[4967]: I1121 15:40:21.157090 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" event={"ID":"f29282d3-e44b-4110-8847-88026fbefabb","Type":"ContainerStarted","Data":"07b61e0f9703860894fa592e147437360f9384a4c16491b751313290a200f07c"} Nov 21 15:40:21 crc kubenswrapper[4967]: I1121 15:40:21.159326 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" event={"ID":"47d97778-e1bd-4c46-b16f-12b6341b9b3e","Type":"ContainerStarted","Data":"886ec11f161a40e1796c79f3215e7da3bf4cc7a5d7769782d79c5df7e26a19ce"} Nov 21 15:40:21 crc kubenswrapper[4967]: I1121 15:40:21.159358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" event={"ID":"47d97778-e1bd-4c46-b16f-12b6341b9b3e","Type":"ContainerStarted","Data":"7dc5a4c8104bf6217377af228d8f99ef17a2e4f8fae779f0900ae4a67abeb6f4"} Nov 21 15:40:21 crc kubenswrapper[4967]: I1121 15:40:21.159615 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:21 crc kubenswrapper[4967]: I1121 15:40:21.180441 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" podStartSLOduration=2.18041436 podStartE2EDuration="2.18041436s" podCreationTimestamp="2025-11-21 15:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:40:21.179677408 +0000 UTC m=+309.438198436" watchObservedRunningTime="2025-11-21 15:40:21.18041436 +0000 UTC m=+309.438935368" Nov 21 15:40:23 crc kubenswrapper[4967]: I1121 15:40:23.175016 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" event={"ID":"f29282d3-e44b-4110-8847-88026fbefabb","Type":"ContainerStarted","Data":"153f2e5c725f536de727d66890f30c16067f21e3db7aff29be06b0ffb0f06350"} Nov 21 15:40:23 crc kubenswrapper[4967]: I1121 15:40:23.175423 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:23 crc kubenswrapper[4967]: I1121 15:40:23.181675 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" Nov 21 15:40:23 crc kubenswrapper[4967]: I1121 15:40:23.192116 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-admission-webhook-f54c54754-8ks67" podStartSLOduration=1.8131308750000001 podStartE2EDuration="3.192093879s" podCreationTimestamp="2025-11-21 15:40:20 +0000 UTC" firstStartedPulling="2025-11-21 15:40:20.699175616 +0000 UTC m=+308.957696624" lastFinishedPulling="2025-11-21 15:40:22.07813862 +0000 UTC m=+310.336659628" observedRunningTime="2025-11-21 15:40:23.18919421 +0000 UTC m=+311.447715238" watchObservedRunningTime="2025-11-21 15:40:23.192093879 +0000 UTC m=+311.450614887" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.220119 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-dd9zs"] Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.221916 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.224534 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-kube-rbac-proxy-config" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.224596 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-dockercfg-twt5j" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.225665 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-operator-tls" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.226983 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-client-ca" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.239911 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-dd9zs"] Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.338806 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b55731e0-f983-4df9-b260-ffe2c0326a14-metrics-client-ca\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.338906 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwgf7\" (UniqueName: \"kubernetes.io/projected/b55731e0-f983-4df9-b260-ffe2c0326a14-kube-api-access-xwgf7\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.338966 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.339049 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.440444 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b55731e0-f983-4df9-b260-ffe2c0326a14-metrics-client-ca\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.440513 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwgf7\" (UniqueName: \"kubernetes.io/projected/b55731e0-f983-4df9-b260-ffe2c0326a14-kube-api-access-xwgf7\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.440566 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.440605 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.441448 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/b55731e0-f983-4df9-b260-ffe2c0326a14-metrics-client-ca\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.447510 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-kube-rbac-proxy-config\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.448067 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-operator-tls\" (UniqueName: \"kubernetes.io/secret/b55731e0-f983-4df9-b260-ffe2c0326a14-prometheus-operator-tls\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.475237 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwgf7\" (UniqueName: \"kubernetes.io/projected/b55731e0-f983-4df9-b260-ffe2c0326a14-kube-api-access-xwgf7\") pod \"prometheus-operator-db54df47d-dd9zs\" (UID: \"b55731e0-f983-4df9-b260-ffe2c0326a14\") " pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.537045 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" Nov 21 15:40:24 crc kubenswrapper[4967]: I1121 15:40:24.985229 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-operator-db54df47d-dd9zs"] Nov 21 15:40:25 crc kubenswrapper[4967]: I1121 15:40:25.187640 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" event={"ID":"b55731e0-f983-4df9-b260-ffe2c0326a14","Type":"ContainerStarted","Data":"a83a0267a4261c961b1675a1edcbda2975f1f7a8cce4a0a89e5e7219c40b0fa9"} Nov 21 15:40:27 crc kubenswrapper[4967]: I1121 15:40:27.200288 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" event={"ID":"b55731e0-f983-4df9-b260-ffe2c0326a14","Type":"ContainerStarted","Data":"38c78aa1a99c20e577bb68ac0af6a8b163c3aba8a9d616abc385c07388e40c88"} Nov 21 15:40:27 crc kubenswrapper[4967]: I1121 15:40:27.200406 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" event={"ID":"b55731e0-f983-4df9-b260-ffe2c0326a14","Type":"ContainerStarted","Data":"24f6e5f67a3858a9aaa0cdc44e3108075ba4802b18d1aeaffe157ee2a7bbc37f"} Nov 21 15:40:27 crc kubenswrapper[4967]: I1121 15:40:27.215403 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-operator-db54df47d-dd9zs" podStartSLOduration=1.568907866 podStartE2EDuration="3.215384464s" podCreationTimestamp="2025-11-21 15:40:24 +0000 UTC" firstStartedPulling="2025-11-21 15:40:24.99666177 +0000 UTC m=+313.255182768" lastFinishedPulling="2025-11-21 15:40:26.643138358 +0000 UTC m=+314.901659366" observedRunningTime="2025-11-21 15:40:27.214259689 +0000 UTC m=+315.472780687" watchObservedRunningTime="2025-11-21 15:40:27.215384464 +0000 UTC m=+315.473905462" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.540842 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t"] Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.542323 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.544868 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-tls" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.545120 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-kube-rbac-proxy-config" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.545373 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"openshift-state-metrics-dockercfg-8nvqf" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.563858 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t"] Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.567756 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms"] Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.569249 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.571146 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kube-state-metrics-custom-resource-state-configmap" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.571730 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-kube-rbac-proxy-config" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.571907 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/node-exporter-hz2wr"] Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.571990 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-tls" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.573260 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.576746 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-tls" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.576779 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-dockercfg-lb6jg" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.577107 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-state-metrics-dockercfg-cb4qx" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.577143 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"node-exporter-kube-rbac-proxy-config" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.598173 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms"] Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.615265 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff8m4\" (UniqueName: \"kubernetes.io/projected/2dab0bd8-61f0-4542-af7c-8a13c90f0535-kube-api-access-ff8m4\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.615385 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2dab0bd8-61f0-4542-af7c-8a13c90f0535-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.615424 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.615448 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716833 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-metrics-client-ca\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716891 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff8m4\" (UniqueName: \"kubernetes.io/projected/2dab0bd8-61f0-4542-af7c-8a13c90f0535-kube-api-access-ff8m4\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716916 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-tls\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716942 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-root\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716966 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-wtmp\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.716990 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717009 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-textfile\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717027 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvfww\" (UniqueName: \"kubernetes.io/projected/723b468f-afbb-4657-8215-cdf269162144-kube-api-access-jvfww\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717045 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-sys\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717062 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717081 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2dab0bd8-61f0-4542-af7c-8a13c90f0535-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717112 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwlzz\" (UniqueName: \"kubernetes.io/projected/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-kube-api-access-cwlzz\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717132 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717152 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717170 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717185 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717207 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.717223 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/723b468f-afbb-4657-8215-cdf269162144-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.718331 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/2dab0bd8-61f0-4542-af7c-8a13c90f0535-metrics-client-ca\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: E1121 15:40:29.718407 4967 secret.go:188] Couldn't get secret openshift-monitoring/openshift-state-metrics-tls: secret "openshift-state-metrics-tls" not found Nov 21 15:40:29 crc kubenswrapper[4967]: E1121 15:40:29.718450 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls podName:2dab0bd8-61f0-4542-af7c-8a13c90f0535 nodeName:}" failed. No retries permitted until 2025-11-21 15:40:30.21843711 +0000 UTC m=+318.476958108 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "openshift-state-metrics-tls" (UniqueName: "kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls") pod "openshift-state-metrics-566fddb674-gwx8t" (UID: "2dab0bd8-61f0-4542-af7c-8a13c90f0535") : secret "openshift-state-metrics-tls" not found Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.734739 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff8m4\" (UniqueName: \"kubernetes.io/projected/2dab0bd8-61f0-4542-af7c-8a13c90f0535-kube-api-access-ff8m4\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.737839 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-kube-rbac-proxy-config\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818288 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818654 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/723b468f-afbb-4657-8215-cdf269162144-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818715 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-metrics-client-ca\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818750 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-tls\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818778 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"root\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-root\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818809 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-wtmp\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818843 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818908 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-textfile\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.818901 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"root\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-root\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819017 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-wtmp\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-wtmp\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819484 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-textfile\" (UniqueName: \"kubernetes.io/empty-dir/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-textfile\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819586 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"volume-directive-shadow\" (UniqueName: \"kubernetes.io/empty-dir/723b468f-afbb-4657-8215-cdf269162144-volume-directive-shadow\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819610 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-metrics-client-ca\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819923 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-custom-resource-state-configmap\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-custom-resource-state-configmap\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.819986 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvfww\" (UniqueName: \"kubernetes.io/projected/723b468f-afbb-4657-8215-cdf269162144-kube-api-access-jvfww\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.820020 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-sys\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.820042 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.820087 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwlzz\" (UniqueName: \"kubernetes.io/projected/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-kube-api-access-cwlzz\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.820132 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.820155 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.821417 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/723b468f-afbb-4657-8215-cdf269162144-metrics-client-ca\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.821705 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-kube-rbac-proxy-config\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.821777 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-sys\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.822177 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-tls\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-tls\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.824057 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-exporter-kube-rbac-proxy-config\" (UniqueName: \"kubernetes.io/secret/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-node-exporter-kube-rbac-proxy-config\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.832020 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/723b468f-afbb-4657-8215-cdf269162144-kube-state-metrics-tls\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.838071 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwlzz\" (UniqueName: \"kubernetes.io/projected/bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4-kube-api-access-cwlzz\") pod \"node-exporter-hz2wr\" (UID: \"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4\") " pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.838455 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvfww\" (UniqueName: \"kubernetes.io/projected/723b468f-afbb-4657-8215-cdf269162144-kube-api-access-jvfww\") pod \"kube-state-metrics-777cb5bd5d-vtjms\" (UID: \"723b468f-afbb-4657-8215-cdf269162144\") " pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.901069 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" Nov 21 15:40:29 crc kubenswrapper[4967]: I1121 15:40:29.908462 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/node-exporter-hz2wr" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.122550 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms"] Nov 21 15:40:30 crc kubenswrapper[4967]: W1121 15:40:30.127040 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod723b468f_afbb_4657_8215_cdf269162144.slice/crio-4a6807d6cf871133a8504828f0a3578002a87c3db50d368af1aa97145312012c WatchSource:0}: Error finding container 4a6807d6cf871133a8504828f0a3578002a87c3db50d368af1aa97145312012c: Status 404 returned error can't find the container with id 4a6807d6cf871133a8504828f0a3578002a87c3db50d368af1aa97145312012c Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.215595 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hz2wr" event={"ID":"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4","Type":"ContainerStarted","Data":"580b367ac170886564f9b9f067f2a28eecf32325e2e378e7dfce9d298f003dbe"} Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.216791 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" event={"ID":"723b468f-afbb-4657-8215-cdf269162144","Type":"ContainerStarted","Data":"4a6807d6cf871133a8504828f0a3578002a87c3db50d368af1aa97145312012c"} Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.226189 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.229963 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-state-metrics-tls\" (UniqueName: \"kubernetes.io/secret/2dab0bd8-61f0-4542-af7c-8a13c90f0535-openshift-state-metrics-tls\") pod \"openshift-state-metrics-566fddb674-gwx8t\" (UID: \"2dab0bd8-61f0-4542-af7c-8a13c90f0535\") " pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.459398 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.683705 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.688596 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.697912 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-metric" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.698189 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy-web" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.698384 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.698533 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-generated" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.698667 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-tls-assets-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.705058 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-web-config" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.705106 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-main-dockercfg-n6pgd" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.705303 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"alertmanager-kube-rbac-proxy" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.709608 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"alertmanager-trusted-ca-bundle" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.712211 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.771799 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t"] Nov 21 15:40:30 crc kubenswrapper[4967]: W1121 15:40:30.794671 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2dab0bd8_61f0_4542_af7c_8a13c90f0535.slice/crio-7a0d75536728f4400b11aea1f1f88a489e0d65306953030dbef2db3248974211 WatchSource:0}: Error finding container 7a0d75536728f4400b11aea1f1f88a489e0d65306953030dbef2db3248974211: Status 404 returned error can't find the container with id 7a0d75536728f4400b11aea1f1f88a489e0d65306953030dbef2db3248974211 Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834566 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834591 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-web-config\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834617 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-out\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834637 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-volume\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834660 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834677 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-tls-assets\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834697 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr8s4\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-kube-api-access-pr8s4\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834717 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834740 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834762 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.834791 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.941681 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942103 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942131 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-web-config\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942176 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-out\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942199 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-volume\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942227 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942247 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-tls-assets\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942272 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr8s4\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-kube-api-access-pr8s4\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942297 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942496 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.942546 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.945170 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-metrics-client-ca\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.948204 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-trusted-ca-bundle\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.949674 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-main-db\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-alertmanager-main-db\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.950109 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-web\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.951271 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-web-config\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.952224 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-out\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.953212 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.954954 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-tls-assets\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.956282 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-kube-rbac-proxy-metric\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-kube-rbac-proxy-metric\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.963689 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr8s4\" (UniqueName: \"kubernetes.io/projected/4aaa3781-4245-4156-afb2-f2756e0b2c4c-kube-api-access-pr8s4\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.966667 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-config-volume\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:30 crc kubenswrapper[4967]: I1121 15:40:30.973776 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-alertmanager-main-tls\" (UniqueName: \"kubernetes.io/secret/4aaa3781-4245-4156-afb2-f2756e0b2c4c-secret-alertmanager-main-tls\") pod \"alertmanager-main-0\" (UID: \"4aaa3781-4245-4156-afb2-f2756e0b2c4c\") " pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.015636 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/alertmanager-main-0" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.232494 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" event={"ID":"2dab0bd8-61f0-4542-af7c-8a13c90f0535","Type":"ContainerStarted","Data":"508ee04002e7eb71cf2fc8455cdfd17435ef1b8d592265a73041222a9d634cc7"} Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.232844 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" event={"ID":"2dab0bd8-61f0-4542-af7c-8a13c90f0535","Type":"ContainerStarted","Data":"a8c420007a80a74927455864f4a16fcc9163190ce00c617390af5150ae4ddde1"} Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.232859 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" event={"ID":"2dab0bd8-61f0-4542-af7c-8a13c90f0535","Type":"ContainerStarted","Data":"7a0d75536728f4400b11aea1f1f88a489e0d65306953030dbef2db3248974211"} Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.272698 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/alertmanager-main-0"] Nov 21 15:40:31 crc kubenswrapper[4967]: W1121 15:40:31.482979 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4aaa3781_4245_4156_afb2_f2756e0b2c4c.slice/crio-3b448f685ef137d912eef3b734cf5c6943b66bc4c31061baa10b508e9268456d WatchSource:0}: Error finding container 3b448f685ef137d912eef3b734cf5c6943b66bc4c31061baa10b508e9268456d: Status 404 returned error can't find the container with id 3b448f685ef137d912eef3b734cf5c6943b66bc4c31061baa10b508e9268456d Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.661175 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/thanos-querier-c9f77484d-pkskm"] Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.664091 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.667092 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-tls" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.667442 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-web" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.667628 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-grpc-tls-7nt2upch0tqfs" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.667911 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-metrics" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.668159 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy-rules" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.668297 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-kube-rbac-proxy" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.669080 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"thanos-querier-dockercfg-lzhjz" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.706455 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-c9f77484d-pkskm"] Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754260 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754373 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754423 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-grpc-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754485 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmjr2\" (UniqueName: \"kubernetes.io/projected/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-kube-api-access-bmjr2\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754515 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-metrics-client-ca\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754560 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754597 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.754660 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856636 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856800 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856838 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856880 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-grpc-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856917 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmjr2\" (UniqueName: \"kubernetes.io/projected/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-kube-api-access-bmjr2\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.856952 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-metrics-client-ca\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.857021 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.857084 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.859969 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-metrics-client-ca\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.861248 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-rules\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-rules\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.861636 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-grpc-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.862286 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-metrics\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-metrics\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.862445 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-tls\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-tls\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.862763 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.863123 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-thanos-querier-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-secret-thanos-querier-kube-rbac-proxy-web\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.874578 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmjr2\" (UniqueName: \"kubernetes.io/projected/4ea00e49-25fa-40a7-af58-d5aa5a8b9301-kube-api-access-bmjr2\") pod \"thanos-querier-c9f77484d-pkskm\" (UID: \"4ea00e49-25fa-40a7-af58-d5aa5a8b9301\") " pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:31 crc kubenswrapper[4967]: I1121 15:40:31.994190 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:32 crc kubenswrapper[4967]: I1121 15:40:32.245641 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hz2wr" event={"ID":"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4","Type":"ContainerStarted","Data":"86566d174f0c1836752b2b08cdeed7c3d8813bd6ae1e58d74096c8c01bb6b48c"} Nov 21 15:40:32 crc kubenswrapper[4967]: I1121 15:40:32.247832 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"3b448f685ef137d912eef3b734cf5c6943b66bc4c31061baa10b508e9268456d"} Nov 21 15:40:32 crc kubenswrapper[4967]: I1121 15:40:32.249262 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/thanos-querier-c9f77484d-pkskm"] Nov 21 15:40:32 crc kubenswrapper[4967]: I1121 15:40:32.257408 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" event={"ID":"723b468f-afbb-4657-8215-cdf269162144","Type":"ContainerStarted","Data":"26edcf81e49e9f915451083068d44bdc3ebd0b5b18b09cbbf19a8c18c07ee02a"} Nov 21 15:40:32 crc kubenswrapper[4967]: W1121 15:40:32.268443 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ea00e49_25fa_40a7_af58_d5aa5a8b9301.slice/crio-38b7cb45ac1c93da70f88463ba005037422de78fbe119aefed22a618405c27a7 WatchSource:0}: Error finding container 38b7cb45ac1c93da70f88463ba005037422de78fbe119aefed22a618405c27a7: Status 404 returned error can't find the container with id 38b7cb45ac1c93da70f88463ba005037422de78fbe119aefed22a618405c27a7 Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.266570 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"38b7cb45ac1c93da70f88463ba005037422de78fbe119aefed22a618405c27a7"} Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.269626 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" event={"ID":"723b468f-afbb-4657-8215-cdf269162144","Type":"ContainerStarted","Data":"21934fcfa7bde71fb4c9de80ef9984d73ceb843357266e23be176a43a551cfdd"} Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.269739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" event={"ID":"723b468f-afbb-4657-8215-cdf269162144","Type":"ContainerStarted","Data":"9986e94ef0305d77121642379b971731abe4cd732713b620f9d8ed73d80b58da"} Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.271230 4967 generic.go:334] "Generic (PLEG): container finished" podID="bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4" containerID="86566d174f0c1836752b2b08cdeed7c3d8813bd6ae1e58d74096c8c01bb6b48c" exitCode=0 Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.271268 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hz2wr" event={"ID":"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4","Type":"ContainerDied","Data":"86566d174f0c1836752b2b08cdeed7c3d8813bd6ae1e58d74096c8c01bb6b48c"} Nov 21 15:40:33 crc kubenswrapper[4967]: I1121 15:40:33.294453 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/kube-state-metrics-777cb5bd5d-vtjms" podStartSLOduration=2.478641482 podStartE2EDuration="4.29442743s" podCreationTimestamp="2025-11-21 15:40:29 +0000 UTC" firstStartedPulling="2025-11-21 15:40:30.129204189 +0000 UTC m=+318.387725197" lastFinishedPulling="2025-11-21 15:40:31.944990137 +0000 UTC m=+320.203511145" observedRunningTime="2025-11-21 15:40:33.291717956 +0000 UTC m=+321.550238974" watchObservedRunningTime="2025-11-21 15:40:33.29442743 +0000 UTC m=+321.552948438" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.281474 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" event={"ID":"2dab0bd8-61f0-4542-af7c-8a13c90f0535","Type":"ContainerStarted","Data":"25c1eeb9d4a7c1ef8deb81dfa97fa1fde766ca3c9283a619e9cfacfebc6bec54"} Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.284445 4967 generic.go:334] "Generic (PLEG): container finished" podID="4aaa3781-4245-4156-afb2-f2756e0b2c4c" containerID="d3a4e8efa446ec7a61955beaf882816cb161fbdf342692c5d94b76dd074c8048" exitCode=0 Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.284507 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerDied","Data":"d3a4e8efa446ec7a61955beaf882816cb161fbdf342692c5d94b76dd074c8048"} Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.289968 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hz2wr" event={"ID":"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4","Type":"ContainerStarted","Data":"ab8e01f153955ed1f0660ed33adf1e8b47050a2f08fab95e175d26d25a46abf8"} Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.290014 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/node-exporter-hz2wr" event={"ID":"bcc78713-90c1-4dd2-9ca0-3a6e3cab12a4","Type":"ContainerStarted","Data":"f467508cf050d024aeb97e09e4b1c9e33dc3f73f675cc2b5a16b9093bc2d1562"} Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.299836 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/openshift-state-metrics-566fddb674-gwx8t" podStartSLOduration=3.041747266 podStartE2EDuration="5.299810265s" podCreationTimestamp="2025-11-21 15:40:29 +0000 UTC" firstStartedPulling="2025-11-21 15:40:31.132740188 +0000 UTC m=+319.391261206" lastFinishedPulling="2025-11-21 15:40:33.390803206 +0000 UTC m=+321.649324205" observedRunningTime="2025-11-21 15:40:34.299259598 +0000 UTC m=+322.557780646" watchObservedRunningTime="2025-11-21 15:40:34.299810265 +0000 UTC m=+322.558331283" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.420601 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/node-exporter-hz2wr" podStartSLOduration=3.414007004 podStartE2EDuration="5.420580855s" podCreationTimestamp="2025-11-21 15:40:29 +0000 UTC" firstStartedPulling="2025-11-21 15:40:29.934849685 +0000 UTC m=+318.193370693" lastFinishedPulling="2025-11-21 15:40:31.941423536 +0000 UTC m=+320.199944544" observedRunningTime="2025-11-21 15:40:34.371138798 +0000 UTC m=+322.629659816" watchObservedRunningTime="2025-11-21 15:40:34.420580855 +0000 UTC m=+322.679101853" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.423541 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.424585 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.449739 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500386 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500468 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500537 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500587 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500616 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ffc5\" (UniqueName: \"kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.500638 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602649 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602733 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ffc5\" (UniqueName: \"kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602765 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602849 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602878 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.602920 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.603692 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.603994 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.604641 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.605201 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.608148 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.610986 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.619973 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ffc5\" (UniqueName: \"kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5\") pod \"console-7466f98554-gfklj\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.745531 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.863918 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/metrics-server-847cb5969b-kjnfg"] Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.865242 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.867213 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-client-certs" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.867940 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-tls" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.868117 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-dockercfg-prxgp" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.868240 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"kubelet-serving-ca-bundle" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.868373 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"metrics-server-audit-profiles" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.868483 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"metrics-server-2uhjh94nbhfpc" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.873501 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-847cb5969b-kjnfg"] Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909513 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/4ab45ac8-999f-4507-b98c-7897ab89cba8-audit-log\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909567 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-server-tls\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909600 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-metrics-server-audit-profiles\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909629 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909684 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4mgl\" (UniqueName: \"kubernetes.io/projected/4ab45ac8-999f-4507-b98c-7897ab89cba8-kube-api-access-g4mgl\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909717 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-client-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:34 crc kubenswrapper[4967]: I1121 15:40:34.909753 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-client-certs\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011066 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011126 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4mgl\" (UniqueName: \"kubernetes.io/projected/4ab45ac8-999f-4507-b98c-7897ab89cba8-kube-api-access-g4mgl\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011178 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-client-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011252 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-client-certs\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011297 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/4ab45ac8-999f-4507-b98c-7897ab89cba8-audit-log\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011362 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-server-tls\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011463 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-metrics-server-audit-profiles\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.011855 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-log\" (UniqueName: \"kubernetes.io/empty-dir/4ab45ac8-999f-4507-b98c-7897ab89cba8-audit-log\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.012292 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-configmap-kubelet-serving-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.012725 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-server-audit-profiles\" (UniqueName: \"kubernetes.io/configmap/4ab45ac8-999f-4507-b98c-7897ab89cba8-metrics-server-audit-profiles\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.017882 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-client-certs\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.019782 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-client-ca-bundle\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.025812 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-server-tls\" (UniqueName: \"kubernetes.io/secret/4ab45ac8-999f-4507-b98c-7897ab89cba8-secret-metrics-server-tls\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.027480 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4mgl\" (UniqueName: \"kubernetes.io/projected/4ab45ac8-999f-4507-b98c-7897ab89cba8-kube-api-access-g4mgl\") pod \"metrics-server-847cb5969b-kjnfg\" (UID: \"4ab45ac8-999f-4507-b98c-7897ab89cba8\") " pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.243890 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.385048 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n"] Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.387697 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.393303 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"default-dockercfg-6tstp" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.393961 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"monitoring-plugin-cert" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.399712 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n"] Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.418469 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/900592a1-d00a-4388-b1ed-53b720d0921b-monitoring-plugin-cert\") pod \"monitoring-plugin-5477bd9d6b-cpp6n\" (UID: \"900592a1-d00a-4388-b1ed-53b720d0921b\") " pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.524465 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/900592a1-d00a-4388-b1ed-53b720d0921b-monitoring-plugin-cert\") pod \"monitoring-plugin-5477bd9d6b-cpp6n\" (UID: \"900592a1-d00a-4388-b1ed-53b720d0921b\") " pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.532687 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"monitoring-plugin-cert\" (UniqueName: \"kubernetes.io/secret/900592a1-d00a-4388-b1ed-53b720d0921b-monitoring-plugin-cert\") pod \"monitoring-plugin-5477bd9d6b-cpp6n\" (UID: \"900592a1-d00a-4388-b1ed-53b720d0921b\") " pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.689583 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/metrics-server-847cb5969b-kjnfg"] Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.725406 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.789098 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:40:35 crc kubenswrapper[4967]: W1121 15:40:35.797780 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71c2ce0c_917f_413a_a66a_ea2eb1a18318.slice/crio-5864c73ade75a9cbb8c10883120707eda8bdc3b66dc748f034f8c2a0367bdf45 WatchSource:0}: Error finding container 5864c73ade75a9cbb8c10883120707eda8bdc3b66dc748f034f8c2a0367bdf45: Status 404 returned error can't find the container with id 5864c73ade75a9cbb8c10883120707eda8bdc3b66dc748f034f8c2a0367bdf45 Nov 21 15:40:35 crc kubenswrapper[4967]: I1121 15:40:35.960833 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n"] Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.013160 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.015581 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.019623 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-sidecar-tls" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.024603 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-kube-rbac-proxy-web" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.028700 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-web-config" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.028731 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.028987 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-tls-assets-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029005 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-thanos-prometheus-http-client-file" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029116 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-dockercfg-lmwf2" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029247 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"kube-rbac-proxy" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029447 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"serving-certs-ca-bundle" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029513 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.029451 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-monitoring"/"prometheus-k8s-grpc-tls-a5f0allcg958o" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.036248 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-k8s-rulefiles-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.037062 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-monitoring"/"prometheus-trusted-ca-bundle" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.065975 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.137951 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138395 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138425 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138452 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138481 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138500 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138518 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-config-out\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138531 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138554 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138573 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138590 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-web-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138610 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138633 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138648 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138673 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138694 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krpd5\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-kube-api-access-krpd5\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138720 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.138758 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.240636 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242628 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242681 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242711 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-config-out\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242738 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242766 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242797 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-web-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242818 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242848 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242885 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242903 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242940 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.242985 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krpd5\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-kube-api-access-krpd5\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.243016 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.243099 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.243149 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.243183 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.243220 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.245053 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-trusted-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.245344 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-db\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-db\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.245918 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-metrics-client-ca\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-metrics-client-ca\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.246892 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-serving-certs-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-serving-certs-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.247768 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/814c6408-a67f-49ce-a646-7e122fde717f-config-out\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.248231 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-thanos-sidecar-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-thanos-sidecar-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.248558 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-metrics-client-certs\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-metrics-client-certs\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.248668 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-kube-rbac-proxy-web\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-kube-rbac-proxy-web\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.249078 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-web-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.249534 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-tls-assets\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.249821 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"configmap-kubelet-serving-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-configmap-kubelet-serving-ca-bundle\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.251719 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-k8s-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/814c6408-a67f-49ce-a646-7e122fde717f-prometheus-k8s-rulefiles-0\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.252285 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-prometheus-k8s-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-prometheus-k8s-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.252810 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-config\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.253605 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-thanos-prometheus-http-client-file\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.257903 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-grpc-tls\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-grpc-tls\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.265223 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krpd5\" (UniqueName: \"kubernetes.io/projected/814c6408-a67f-49ce-a646-7e122fde717f-kube-api-access-krpd5\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.266085 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-kube-rbac-proxy\" (UniqueName: \"kubernetes.io/secret/814c6408-a67f-49ce-a646-7e122fde717f-secret-kube-rbac-proxy\") pod \"prometheus-k8s-0\" (UID: \"814c6408-a67f-49ce-a646-7e122fde717f\") " pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.305080 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" event={"ID":"900592a1-d00a-4388-b1ed-53b720d0921b","Type":"ContainerStarted","Data":"23914ea5a717d6b3c53b9f25e74d3f5e06c473d52bd49b21b945f86a87389a02"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.309362 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"30641c0a67b14d63abbea15397a49a127112f06a620822b71761677cc5d8309c"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.309407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"577f856c7d2bca9c9f981eedbebff3ff8c693a78770bf3426d165b41dd3460bd"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.309422 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"7c5aabb7d982bdfdab3cc19d5bef356cdedfbe2b473133b30465822f8e8c9eff"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.311565 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7466f98554-gfklj" event={"ID":"71c2ce0c-917f-413a-a66a-ea2eb1a18318","Type":"ContainerStarted","Data":"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.311592 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7466f98554-gfklj" event={"ID":"71c2ce0c-917f-413a-a66a-ea2eb1a18318","Type":"ContainerStarted","Data":"5864c73ade75a9cbb8c10883120707eda8bdc3b66dc748f034f8c2a0367bdf45"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.313752 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" event={"ID":"4ab45ac8-999f-4507-b98c-7897ab89cba8","Type":"ContainerStarted","Data":"742a64a1281ff0275d526f02b8c85dc27015625424780b04665c85ed48a47860"} Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.335766 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7466f98554-gfklj" podStartSLOduration=2.335741632 podStartE2EDuration="2.335741632s" podCreationTimestamp="2025-11-21 15:40:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:40:36.328350784 +0000 UTC m=+324.586871822" watchObservedRunningTime="2025-11-21 15:40:36.335741632 +0000 UTC m=+324.594262640" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.390890 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:36 crc kubenswrapper[4967]: I1121 15:40:36.643168 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-monitoring/prometheus-k8s-0"] Nov 21 15:40:37 crc kubenswrapper[4967]: I1121 15:40:37.322025 4967 generic.go:334] "Generic (PLEG): container finished" podID="814c6408-a67f-49ce-a646-7e122fde717f" containerID="0df3e5f4d41b9f84e07393fbdf437687a2efee724021b7cdf4d981446df3587b" exitCode=0 Nov 21 15:40:37 crc kubenswrapper[4967]: I1121 15:40:37.322115 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerDied","Data":"0df3e5f4d41b9f84e07393fbdf437687a2efee724021b7cdf4d981446df3587b"} Nov 21 15:40:37 crc kubenswrapper[4967]: I1121 15:40:37.322413 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"11339d86743695771b4ef7a7305f0a08d64644eee28ecec32a43f06414f9df68"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.338283 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"a1328ea7be28a84ee9a5c5aff2590ed520695cda9efa75966b91be0a6c48d566"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.338711 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.338726 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"51f22e6f5c60e1d41c605f234ca688f6ebc5846d92211b3e90e0431cab15e2f0"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.338739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" event={"ID":"4ea00e49-25fa-40a7-af58-d5aa5a8b9301","Type":"ContainerStarted","Data":"e7c8b3b962831f999a76e435127fd40336046488e595fba004582a81f3738724"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346109 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"79b40cd12da0bf1949e8a2e097c8d5cb99ef7d154d05313a9aa1dcca7da8c6da"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346216 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"2ede8958e76f8b2e7f035036123ed5c76227888924c2b8c38065889da0ac7277"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346253 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"de823de9d9fa2de00f71a2ed353df3b55b0a85a920b30bb59e751150f6aea32a"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346282 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"833d4e5ddbe46f8d1c15d9ca4eca70e40023b3495f75e90daacb429c4dbf1ffd"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346341 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"83b6c371dc07746a071b2816e9920bccd7bbc753f33aa1e696d5dc97269ef778"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.346353 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/alertmanager-main-0" event={"ID":"4aaa3781-4245-4156-afb2-f2756e0b2c4c","Type":"ContainerStarted","Data":"51e19586c822fa9e83791a7f240a9d4c813bffed0c70510f0ecc996c991ab295"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.348626 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" event={"ID":"4ab45ac8-999f-4507-b98c-7897ab89cba8","Type":"ContainerStarted","Data":"9eb96cedd1ec492a9deb7054692d5e11ae279123181eb19930be1162e0370f56"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.350750 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" event={"ID":"900592a1-d00a-4388-b1ed-53b720d0921b","Type":"ContainerStarted","Data":"305d926541761a3a8004bda2c4643845de01281954f4f53c147121799b261425"} Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.351039 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.356677 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.373566 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" podStartSLOduration=2.339075728 podStartE2EDuration="8.373543749s" podCreationTimestamp="2025-11-21 15:40:31 +0000 UTC" firstStartedPulling="2025-11-21 15:40:32.271600025 +0000 UTC m=+320.530121033" lastFinishedPulling="2025-11-21 15:40:38.306068046 +0000 UTC m=+326.564589054" observedRunningTime="2025-11-21 15:40:39.365795752 +0000 UTC m=+327.624316770" watchObservedRunningTime="2025-11-21 15:40:39.373543749 +0000 UTC m=+327.632064757" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.389226 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/monitoring-plugin-5477bd9d6b-cpp6n" podStartSLOduration=2.139822163 podStartE2EDuration="4.389202827s" podCreationTimestamp="2025-11-21 15:40:35 +0000 UTC" firstStartedPulling="2025-11-21 15:40:35.977190907 +0000 UTC m=+324.235711915" lastFinishedPulling="2025-11-21 15:40:38.226571571 +0000 UTC m=+326.485092579" observedRunningTime="2025-11-21 15:40:39.387239727 +0000 UTC m=+327.645760755" watchObservedRunningTime="2025-11-21 15:40:39.389202827 +0000 UTC m=+327.647723835" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.414992 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/alertmanager-main-0" podStartSLOduration=2.675458174 podStartE2EDuration="9.414970133s" podCreationTimestamp="2025-11-21 15:40:30 +0000 UTC" firstStartedPulling="2025-11-21 15:40:31.485650089 +0000 UTC m=+319.744171097" lastFinishedPulling="2025-11-21 15:40:38.225162048 +0000 UTC m=+326.483683056" observedRunningTime="2025-11-21 15:40:39.41325419 +0000 UTC m=+327.671775198" watchObservedRunningTime="2025-11-21 15:40:39.414970133 +0000 UTC m=+327.673491141" Nov 21 15:40:39 crc kubenswrapper[4967]: I1121 15:40:39.448832 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" podStartSLOduration=2.910127284 podStartE2EDuration="5.448803525s" podCreationTimestamp="2025-11-21 15:40:34 +0000 UTC" firstStartedPulling="2025-11-21 15:40:35.688008074 +0000 UTC m=+323.946529082" lastFinishedPulling="2025-11-21 15:40:38.226684315 +0000 UTC m=+326.485205323" observedRunningTime="2025-11-21 15:40:39.444761701 +0000 UTC m=+327.703282719" watchObservedRunningTime="2025-11-21 15:40:39.448803525 +0000 UTC m=+327.707324533" Nov 21 15:40:40 crc kubenswrapper[4967]: I1121 15:40:40.326221 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-wwjmz" Nov 21 15:40:40 crc kubenswrapper[4967]: I1121 15:40:40.375756 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:40:40 crc kubenswrapper[4967]: I1121 15:40:40.382872 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/thanos-querier-c9f77484d-pkskm" Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390223 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"bccb3f17c2cee83cd2b722fbda63fafd5cb267921aa3d27cfac8a95df488d9d9"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390560 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"d74b484c7f68080b7d2122e18a0bb8650a792c4ddb7f37f1c583285d7a96001c"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390572 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"104cb24ed004958e34f298209a2e64d0b63fcac2e8232bed4ce31fbc655217b9"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390581 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"fe29aae73d4510e4c909b15b651e0dcad75e655c79994972a0846a24219d6070"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390594 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"ecec62f9a4f5cbda97dc21ce199a0964d3aa1b6827d6bd7f4d983f070f596001"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.390603 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-monitoring/prometheus-k8s-0" event={"ID":"814c6408-a67f-49ce-a646-7e122fde717f","Type":"ContainerStarted","Data":"669558d6d43b50481370fab4bda4dd53510f2b8466bbb8b486029f7b0ba94b73"} Nov 21 15:40:42 crc kubenswrapper[4967]: I1121 15:40:42.420175 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-monitoring/prometheus-k8s-0" podStartSLOduration=3.401504798 podStartE2EDuration="7.420155435s" podCreationTimestamp="2025-11-21 15:40:35 +0000 UTC" firstStartedPulling="2025-11-21 15:40:37.324415182 +0000 UTC m=+325.582936190" lastFinishedPulling="2025-11-21 15:40:41.343065819 +0000 UTC m=+329.601586827" observedRunningTime="2025-11-21 15:40:42.41573553 +0000 UTC m=+330.674256528" watchObservedRunningTime="2025-11-21 15:40:42.420155435 +0000 UTC m=+330.678676443" Nov 21 15:40:44 crc kubenswrapper[4967]: I1121 15:40:44.745879 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:44 crc kubenswrapper[4967]: I1121 15:40:44.746589 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:44 crc kubenswrapper[4967]: I1121 15:40:44.751002 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:45 crc kubenswrapper[4967]: I1121 15:40:45.417052 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:40:45 crc kubenswrapper[4967]: I1121 15:40:45.474856 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:40:46 crc kubenswrapper[4967]: I1121 15:40:46.391184 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:40:46 crc kubenswrapper[4967]: I1121 15:40:46.521611 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:40:46 crc kubenswrapper[4967]: I1121 15:40:46.521673 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:40:55 crc kubenswrapper[4967]: I1121 15:40:55.244545 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:40:55 crc kubenswrapper[4967]: I1121 15:40:55.245005 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:41:05 crc kubenswrapper[4967]: I1121 15:41:05.420593 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" podUID="274aead1-3e11-4349-99be-32e19bfe7d78" containerName="registry" containerID="cri-o://8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90" gracePeriod=30 Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.242138 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339566 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339639 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82dlq\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339690 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339726 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339797 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.339845 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.340024 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.340055 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token\") pod \"274aead1-3e11-4349-99be-32e19bfe7d78\" (UID: \"274aead1-3e11-4349-99be-32e19bfe7d78\") " Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.340605 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.340894 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.346097 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.346411 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.347978 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq" (OuterVolumeSpecName: "kube-api-access-82dlq") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "kube-api-access-82dlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.352870 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.353075 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.361042 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "274aead1-3e11-4349-99be-32e19bfe7d78" (UID: "274aead1-3e11-4349-99be-32e19bfe7d78"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441351 4967 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/274aead1-3e11-4349-99be-32e19bfe7d78-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441663 4967 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441692 4967 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441739 4967 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/274aead1-3e11-4349-99be-32e19bfe7d78-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441750 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82dlq\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-kube-api-access-82dlq\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441763 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/274aead1-3e11-4349-99be-32e19bfe7d78-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.441773 4967 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/274aead1-3e11-4349-99be-32e19bfe7d78-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.545837 4967 generic.go:334] "Generic (PLEG): container finished" podID="274aead1-3e11-4349-99be-32e19bfe7d78" containerID="8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90" exitCode=0 Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.545921 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.548469 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" event={"ID":"274aead1-3e11-4349-99be-32e19bfe7d78","Type":"ContainerDied","Data":"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90"} Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.548512 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kls8w" event={"ID":"274aead1-3e11-4349-99be-32e19bfe7d78","Type":"ContainerDied","Data":"d523f2e7b6d76f71bb15a8dd66084e5eb146a30047f1cb2f1d24ec6acd22b878"} Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.548533 4967 scope.go:117] "RemoveContainer" containerID="8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.566998 4967 scope.go:117] "RemoveContainer" containerID="8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90" Nov 21 15:41:06 crc kubenswrapper[4967]: E1121 15:41:06.567603 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90\": container with ID starting with 8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90 not found: ID does not exist" containerID="8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.567646 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90"} err="failed to get container status \"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90\": rpc error: code = NotFound desc = could not find container \"8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90\": container with ID starting with 8f01ce99440ab93c52792e64906cd0c0910c3c8132d6c82e5f5b0715b867ba90 not found: ID does not exist" Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.589719 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:41:06 crc kubenswrapper[4967]: I1121 15:41:06.594236 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kls8w"] Nov 21 15:41:08 crc kubenswrapper[4967]: I1121 15:41:08.543372 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="274aead1-3e11-4349-99be-32e19bfe7d78" path="/var/lib/kubelet/pods/274aead1-3e11-4349-99be-32e19bfe7d78/volumes" Nov 21 15:41:10 crc kubenswrapper[4967]: I1121 15:41:10.521002 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-m45jq" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" containerID="cri-o://4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82" gracePeriod=15 Nov 21 15:41:10 crc kubenswrapper[4967]: I1121 15:41:10.910499 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m45jq_daf11197-7c7a-4a0e-8c7d-de8047b53fe7/console/0.log" Nov 21 15:41:10 crc kubenswrapper[4967]: I1121 15:41:10.911012 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006053 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006136 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006206 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006234 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006280 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006352 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.006405 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwjq7\" (UniqueName: \"kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7\") pod \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\" (UID: \"daf11197-7c7a-4a0e-8c7d-de8047b53fe7\") " Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.007085 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config" (OuterVolumeSpecName: "console-config") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.007108 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.007096 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca" (OuterVolumeSpecName: "service-ca") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.007554 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.013023 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7" (OuterVolumeSpecName: "kube-api-access-xwjq7") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "kube-api-access-xwjq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.016505 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.023643 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "daf11197-7c7a-4a0e-8c7d-de8047b53fe7" (UID: "daf11197-7c7a-4a0e-8c7d-de8047b53fe7"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107233 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107265 4967 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107275 4967 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107284 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwjq7\" (UniqueName: \"kubernetes.io/projected/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-kube-api-access-xwjq7\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107292 4967 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107303 4967 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.107315 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/daf11197-7c7a-4a0e-8c7d-de8047b53fe7-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574162 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m45jq_daf11197-7c7a-4a0e-8c7d-de8047b53fe7/console/0.log" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574220 4967 generic.go:334] "Generic (PLEG): container finished" podID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerID="4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82" exitCode=2 Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574256 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m45jq" event={"ID":"daf11197-7c7a-4a0e-8c7d-de8047b53fe7","Type":"ContainerDied","Data":"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82"} Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574299 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m45jq" event={"ID":"daf11197-7c7a-4a0e-8c7d-de8047b53fe7","Type":"ContainerDied","Data":"f2668dfc7794650b5f8d3135c4bd20d6eadf4571c225176addad9333497be53e"} Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574302 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m45jq" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.574354 4967 scope.go:117] "RemoveContainer" containerID="4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.591751 4967 scope.go:117] "RemoveContainer" containerID="4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82" Nov 21 15:41:11 crc kubenswrapper[4967]: E1121 15:41:11.593380 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82\": container with ID starting with 4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82 not found: ID does not exist" containerID="4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.593454 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82"} err="failed to get container status \"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82\": rpc error: code = NotFound desc = could not find container \"4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82\": container with ID starting with 4b144cab1441f8f6651534ddc94421be9082e28042f5e86123653ffc8bf8cc82 not found: ID does not exist" Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.609422 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:41:11 crc kubenswrapper[4967]: I1121 15:41:11.612944 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-m45jq"] Nov 21 15:41:12 crc kubenswrapper[4967]: I1121 15:41:12.544147 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" path="/var/lib/kubelet/pods/daf11197-7c7a-4a0e-8c7d-de8047b53fe7/volumes" Nov 21 15:41:15 crc kubenswrapper[4967]: I1121 15:41:15.249098 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:41:15 crc kubenswrapper[4967]: I1121 15:41:15.254365 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/metrics-server-847cb5969b-kjnfg" Nov 21 15:41:16 crc kubenswrapper[4967]: I1121 15:41:16.521611 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:41:16 crc kubenswrapper[4967]: I1121 15:41:16.521920 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:41:36 crc kubenswrapper[4967]: I1121 15:41:36.391600 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:41:36 crc kubenswrapper[4967]: I1121 15:41:36.418829 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:41:36 crc kubenswrapper[4967]: I1121 15:41:36.777128 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-monitoring/prometheus-k8s-0" Nov 21 15:41:46 crc kubenswrapper[4967]: I1121 15:41:46.521927 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:41:46 crc kubenswrapper[4967]: I1121 15:41:46.522479 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:41:46 crc kubenswrapper[4967]: I1121 15:41:46.522528 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:41:46 crc kubenswrapper[4967]: I1121 15:41:46.523033 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:41:46 crc kubenswrapper[4967]: I1121 15:41:46.523074 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b" gracePeriod=600 Nov 21 15:41:47 crc kubenswrapper[4967]: I1121 15:41:47.806632 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b" exitCode=0 Nov 21 15:41:47 crc kubenswrapper[4967]: I1121 15:41:47.806715 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b"} Nov 21 15:41:47 crc kubenswrapper[4967]: I1121 15:41:47.807011 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a"} Nov 21 15:41:47 crc kubenswrapper[4967]: I1121 15:41:47.807039 4967 scope.go:117] "RemoveContainer" containerID="dee586ec31296dbd769b1780bc1cb37b8c54763d6356956b8cedf96a02a08d0c" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.811637 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:41:57 crc kubenswrapper[4967]: E1121 15:41:57.813220 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="274aead1-3e11-4349-99be-32e19bfe7d78" containerName="registry" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.813290 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="274aead1-3e11-4349-99be-32e19bfe7d78" containerName="registry" Nov 21 15:41:57 crc kubenswrapper[4967]: E1121 15:41:57.813380 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.813504 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.813683 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="274aead1-3e11-4349-99be-32e19bfe7d78" containerName="registry" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.813766 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="daf11197-7c7a-4a0e-8c7d-de8047b53fe7" containerName="console" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.814445 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.831605 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885303 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885396 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885438 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885477 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885505 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d8xd\" (UniqueName: \"kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885538 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.885564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986809 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986886 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986914 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986944 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d8xd\" (UniqueName: \"kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986969 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.986989 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.987033 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.988080 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.988173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.990702 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.990784 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.994285 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:57 crc kubenswrapper[4967]: I1121 15:41:57.998048 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:58 crc kubenswrapper[4967]: I1121 15:41:58.007436 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d8xd\" (UniqueName: \"kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd\") pod \"console-84b549b7f9-mhv2t\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:58 crc kubenswrapper[4967]: I1121 15:41:58.130563 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:41:58 crc kubenswrapper[4967]: I1121 15:41:58.329562 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:41:58 crc kubenswrapper[4967]: I1121 15:41:58.887622 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b549b7f9-mhv2t" event={"ID":"a588e30a-43fb-4d78-a420-db178082c222","Type":"ContainerStarted","Data":"10cfbf5a9c015ee5803123b0a9c47b2b1de39905e00f0fb3d8ad10659dca124d"} Nov 21 15:41:59 crc kubenswrapper[4967]: I1121 15:41:59.894147 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b549b7f9-mhv2t" event={"ID":"a588e30a-43fb-4d78-a420-db178082c222","Type":"ContainerStarted","Data":"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787"} Nov 21 15:41:59 crc kubenswrapper[4967]: I1121 15:41:59.912452 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-84b549b7f9-mhv2t" podStartSLOduration=2.9124332280000003 podStartE2EDuration="2.912433228s" podCreationTimestamp="2025-11-21 15:41:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:41:59.909805133 +0000 UTC m=+408.168326151" watchObservedRunningTime="2025-11-21 15:41:59.912433228 +0000 UTC m=+408.170954236" Nov 21 15:42:08 crc kubenswrapper[4967]: I1121 15:42:08.131176 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:42:08 crc kubenswrapper[4967]: I1121 15:42:08.131759 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:42:08 crc kubenswrapper[4967]: I1121 15:42:08.135819 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:42:08 crc kubenswrapper[4967]: I1121 15:42:08.953755 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:42:09 crc kubenswrapper[4967]: I1121 15:42:09.066180 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:42:34 crc kubenswrapper[4967]: I1121 15:42:34.107716 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7466f98554-gfklj" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerName="console" containerID="cri-o://71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914" gracePeriod=15 Nov 21 15:42:34 crc kubenswrapper[4967]: I1121 15:42:34.747249 4967 patch_prober.go:28] interesting pod/console-7466f98554-gfklj container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.69:8443/health\": dial tcp 10.217.0.69:8443: connect: connection refused" start-of-body= Nov 21 15:42:34 crc kubenswrapper[4967]: I1121 15:42:34.747361 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-7466f98554-gfklj" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerName="console" probeResult="failure" output="Get \"https://10.217.0.69:8443/health\": dial tcp 10.217.0.69:8443: connect: connection refused" Nov 21 15:42:34 crc kubenswrapper[4967]: I1121 15:42:34.963867 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7466f98554-gfklj_71c2ce0c-917f-413a-a66a-ea2eb1a18318/console/0.log" Nov 21 15:42:34 crc kubenswrapper[4967]: I1121 15:42:34.964229 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110190 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7466f98554-gfklj_71c2ce0c-917f-413a-a66a-ea2eb1a18318/console/0.log" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110241 4967 generic.go:334] "Generic (PLEG): container finished" podID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerID="71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914" exitCode=2 Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110268 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7466f98554-gfklj" event={"ID":"71c2ce0c-917f-413a-a66a-ea2eb1a18318","Type":"ContainerDied","Data":"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914"} Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110298 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7466f98554-gfklj" event={"ID":"71c2ce0c-917f-413a-a66a-ea2eb1a18318","Type":"ContainerDied","Data":"5864c73ade75a9cbb8c10883120707eda8bdc3b66dc748f034f8c2a0367bdf45"} Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110341 4967 scope.go:117] "RemoveContainer" containerID="71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.110348 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7466f98554-gfklj" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.116869 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.116972 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117028 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117069 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117103 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ffc5\" (UniqueName: \"kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117129 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117160 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca\") pod \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\" (UID: \"71c2ce0c-917f-413a-a66a-ea2eb1a18318\") " Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117794 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.117837 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca" (OuterVolumeSpecName: "service-ca") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.118484 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config" (OuterVolumeSpecName: "console-config") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.118538 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.123103 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5" (OuterVolumeSpecName: "kube-api-access-4ffc5") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "kube-api-access-4ffc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.123054 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.130223 4967 scope.go:117] "RemoveContainer" containerID="71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914" Nov 21 15:42:35 crc kubenswrapper[4967]: E1121 15:42:35.130784 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914\": container with ID starting with 71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914 not found: ID does not exist" containerID="71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.130828 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914"} err="failed to get container status \"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914\": rpc error: code = NotFound desc = could not find container \"71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914\": container with ID starting with 71eccae0bf19881b09cbc3c86848cf8cb9d20cfaff0624b718ca13c7377f9914 not found: ID does not exist" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.132852 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "71c2ce0c-917f-413a-a66a-ea2eb1a18318" (UID: "71c2ce0c-917f-413a-a66a-ea2eb1a18318"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218753 4967 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218801 4967 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218811 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ffc5\" (UniqueName: \"kubernetes.io/projected/71c2ce0c-917f-413a-a66a-ea2eb1a18318-kube-api-access-4ffc5\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218824 4967 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218834 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218842 4967 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/71c2ce0c-917f-413a-a66a-ea2eb1a18318-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.218850 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/71c2ce0c-917f-413a-a66a-ea2eb1a18318-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.443970 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:42:35 crc kubenswrapper[4967]: I1121 15:42:35.447028 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7466f98554-gfklj"] Nov 21 15:42:36 crc kubenswrapper[4967]: I1121 15:42:36.551304 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" path="/var/lib/kubelet/pods/71c2ce0c-917f-413a-a66a-ea2eb1a18318/volumes" Nov 21 15:44:16 crc kubenswrapper[4967]: I1121 15:44:16.522711 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:44:16 crc kubenswrapper[4967]: I1121 15:44:16.523359 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:44:46 crc kubenswrapper[4967]: I1121 15:44:46.521729 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:44:46 crc kubenswrapper[4967]: I1121 15:44:46.522265 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.856654 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg"] Nov 21 15:44:56 crc kubenswrapper[4967]: E1121 15:44:56.858027 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerName="console" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.858059 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerName="console" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.858247 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="71c2ce0c-917f-413a-a66a-ea2eb1a18318" containerName="console" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.859474 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.861894 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.873509 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg"] Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.939281 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lvlk\" (UniqueName: \"kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.939401 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:56 crc kubenswrapper[4967]: I1121 15:44:56.939445 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.040676 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.040760 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.040836 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lvlk\" (UniqueName: \"kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.041338 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.041575 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.076230 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lvlk\" (UniqueName: \"kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.179245 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:44:57 crc kubenswrapper[4967]: I1121 15:44:57.393172 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg"] Nov 21 15:44:58 crc kubenswrapper[4967]: I1121 15:44:58.083050 4967 generic.go:334] "Generic (PLEG): container finished" podID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerID="4378b9e6b19405d0d4a162ae28adda4eef8905fe98c8134f75f4993eff988629" exitCode=0 Nov 21 15:44:58 crc kubenswrapper[4967]: I1121 15:44:58.083119 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" event={"ID":"3989e919-932b-4153-98cf-5f1ebcc40f89","Type":"ContainerDied","Data":"4378b9e6b19405d0d4a162ae28adda4eef8905fe98c8134f75f4993eff988629"} Nov 21 15:44:58 crc kubenswrapper[4967]: I1121 15:44:58.083358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" event={"ID":"3989e919-932b-4153-98cf-5f1ebcc40f89","Type":"ContainerStarted","Data":"34ed56eeb70e893e1bb6b1acce50a2eff9891fbe5f9655f444f0bb0d70edcfe5"} Nov 21 15:44:58 crc kubenswrapper[4967]: I1121 15:44:58.085197 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.096064 4967 generic.go:334] "Generic (PLEG): container finished" podID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerID="fd0d3a19fc2dfc3186f0bb63246587cf8db8c27de286335fed0bd37cdbee5eb0" exitCode=0 Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.096254 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" event={"ID":"3989e919-932b-4153-98cf-5f1ebcc40f89","Type":"ContainerDied","Data":"fd0d3a19fc2dfc3186f0bb63246587cf8db8c27de286335fed0bd37cdbee5eb0"} Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.153131 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd"] Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.154418 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.158459 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.158161 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.162762 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd"] Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.293061 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.293134 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xfrp\" (UniqueName: \"kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.293257 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.394906 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.394980 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.395013 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xfrp\" (UniqueName: \"kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.396914 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.404113 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.412034 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xfrp\" (UniqueName: \"kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp\") pod \"collect-profiles-29395665-rtvmd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.516212 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:00 crc kubenswrapper[4967]: I1121 15:45:00.939509 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd"] Nov 21 15:45:00 crc kubenswrapper[4967]: W1121 15:45:00.947294 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd59513f6_55e3_4bbb_9207_cdd4936124cd.slice/crio-db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5 WatchSource:0}: Error finding container db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5: Status 404 returned error can't find the container with id db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5 Nov 21 15:45:01 crc kubenswrapper[4967]: I1121 15:45:01.104090 4967 generic.go:334] "Generic (PLEG): container finished" podID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerID="5742b77e65e93560a63be0880e50ac8fe037963efaef0d511a712c971c6714b8" exitCode=0 Nov 21 15:45:01 crc kubenswrapper[4967]: I1121 15:45:01.104150 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" event={"ID":"3989e919-932b-4153-98cf-5f1ebcc40f89","Type":"ContainerDied","Data":"5742b77e65e93560a63be0880e50ac8fe037963efaef0d511a712c971c6714b8"} Nov 21 15:45:01 crc kubenswrapper[4967]: I1121 15:45:01.105167 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" event={"ID":"d59513f6-55e3-4bbb-9207-cdd4936124cd","Type":"ContainerStarted","Data":"db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5"} Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.112136 4967 generic.go:334] "Generic (PLEG): container finished" podID="d59513f6-55e3-4bbb-9207-cdd4936124cd" containerID="9a3a36ddff348a89aee0880c30634d6cca08a15fa3fc20d5e9eaa5aa0e5f7e0e" exitCode=0 Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.112195 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" event={"ID":"d59513f6-55e3-4bbb-9207-cdd4936124cd","Type":"ContainerDied","Data":"9a3a36ddff348a89aee0880c30634d6cca08a15fa3fc20d5e9eaa5aa0e5f7e0e"} Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.328736 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.431072 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle\") pod \"3989e919-932b-4153-98cf-5f1ebcc40f89\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.431341 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util\") pod \"3989e919-932b-4153-98cf-5f1ebcc40f89\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.431394 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lvlk\" (UniqueName: \"kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk\") pod \"3989e919-932b-4153-98cf-5f1ebcc40f89\" (UID: \"3989e919-932b-4153-98cf-5f1ebcc40f89\") " Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.433397 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle" (OuterVolumeSpecName: "bundle") pod "3989e919-932b-4153-98cf-5f1ebcc40f89" (UID: "3989e919-932b-4153-98cf-5f1ebcc40f89"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.436611 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk" (OuterVolumeSpecName: "kube-api-access-4lvlk") pod "3989e919-932b-4153-98cf-5f1ebcc40f89" (UID: "3989e919-932b-4153-98cf-5f1ebcc40f89"). InnerVolumeSpecName "kube-api-access-4lvlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.445966 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util" (OuterVolumeSpecName: "util") pod "3989e919-932b-4153-98cf-5f1ebcc40f89" (UID: "3989e919-932b-4153-98cf-5f1ebcc40f89"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.533676 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.534020 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3989e919-932b-4153-98cf-5f1ebcc40f89-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:02 crc kubenswrapper[4967]: I1121 15:45:02.534037 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lvlk\" (UniqueName: \"kubernetes.io/projected/3989e919-932b-4153-98cf-5f1ebcc40f89-kube-api-access-4lvlk\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.124290 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" event={"ID":"3989e919-932b-4153-98cf-5f1ebcc40f89","Type":"ContainerDied","Data":"34ed56eeb70e893e1bb6b1acce50a2eff9891fbe5f9655f444f0bb0d70edcfe5"} Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.124354 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34ed56eeb70e893e1bb6b1acce50a2eff9891fbe5f9655f444f0bb0d70edcfe5" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.124370 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.324082 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.448858 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume\") pod \"d59513f6-55e3-4bbb-9207-cdd4936124cd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.448942 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume\") pod \"d59513f6-55e3-4bbb-9207-cdd4936124cd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.449176 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfrp\" (UniqueName: \"kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp\") pod \"d59513f6-55e3-4bbb-9207-cdd4936124cd\" (UID: \"d59513f6-55e3-4bbb-9207-cdd4936124cd\") " Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.449926 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume" (OuterVolumeSpecName: "config-volume") pod "d59513f6-55e3-4bbb-9207-cdd4936124cd" (UID: "d59513f6-55e3-4bbb-9207-cdd4936124cd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.452847 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp" (OuterVolumeSpecName: "kube-api-access-9xfrp") pod "d59513f6-55e3-4bbb-9207-cdd4936124cd" (UID: "d59513f6-55e3-4bbb-9207-cdd4936124cd"). InnerVolumeSpecName "kube-api-access-9xfrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.453342 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d59513f6-55e3-4bbb-9207-cdd4936124cd" (UID: "d59513f6-55e3-4bbb-9207-cdd4936124cd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.551328 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfrp\" (UniqueName: \"kubernetes.io/projected/d59513f6-55e3-4bbb-9207-cdd4936124cd-kube-api-access-9xfrp\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.551367 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d59513f6-55e3-4bbb-9207-cdd4936124cd-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:03 crc kubenswrapper[4967]: I1121 15:45:03.551380 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d59513f6-55e3-4bbb-9207-cdd4936124cd-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:04 crc kubenswrapper[4967]: I1121 15:45:04.131762 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" event={"ID":"d59513f6-55e3-4bbb-9207-cdd4936124cd","Type":"ContainerDied","Data":"db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5"} Nov 21 15:45:04 crc kubenswrapper[4967]: I1121 15:45:04.131837 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db01f8ba84e2d68a9145fedd1d9587c0954585125439d3af00ba472eb1475bb5" Nov 21 15:45:04 crc kubenswrapper[4967]: I1121 15:45:04.131854 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd" Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.137218 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zm492"] Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138331 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-controller" containerID="cri-o://1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138465 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-acl-logging" containerID="cri-o://7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138443 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="sbdb" containerID="cri-o://2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138515 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138548 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="northd" containerID="cri-o://e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138495 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-node" containerID="cri-o://3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.138456 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="nbdb" containerID="cri-o://4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" gracePeriod=30 Nov 21 15:45:08 crc kubenswrapper[4967]: I1121 15:45:08.170945 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" containerID="cri-o://07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" gracePeriod=30 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.169143 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovnkube-controller/3.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.171572 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-acl-logging/0.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.171984 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-controller/0.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172387 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" exitCode=0 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172413 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" exitCode=0 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172422 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" exitCode=0 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172432 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" exitCode=0 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172441 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" exitCode=143 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172451 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" exitCode=143 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172499 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172533 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172547 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172558 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172569 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172580 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.172599 4967 scope.go:117] "RemoveContainer" containerID="a8d45d2b6e1bb1f80ce967579185bd793f0c1dae1083720a39ae3ad3863ca14d" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.174530 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/2.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.174864 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/1.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.174889 4967 generic.go:334] "Generic (PLEG): container finished" podID="629a5f41-3cd8-4518-a833-2832f4ebe55a" containerID="3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b" exitCode=2 Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.174911 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerDied","Data":"3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b"} Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.175414 4967 scope.go:117] "RemoveContainer" containerID="3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.175702 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-j4dcx_openshift-multus(629a5f41-3cd8-4518-a833-2832f4ebe55a)\"" pod="openshift-multus/multus-j4dcx" podUID="629a5f41-3cd8-4518-a833-2832f4ebe55a" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.223520 4967 scope.go:117] "RemoveContainer" containerID="691bc6b6db50f421eb9e82f4a113bcef24c35943fe77db1a1c5635a24de9674e" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.369176 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-acl-logging/0.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.369781 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-controller/0.log" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.370164 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434288 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434395 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434420 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434454 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434533 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434580 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434581 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434620 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434644 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434663 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash" (OuterVolumeSpecName: "host-slash") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434674 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434701 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434711 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434731 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log" (OuterVolumeSpecName: "node-log") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434754 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434757 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434799 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434834 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvcsc\" (UniqueName: \"kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434863 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434899 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434930 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.434963 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435021 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435052 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435077 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd\") pod \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\" (UID: \"eeb9277d-9a26-4665-a01c-9ed1c379e8dd\") " Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435342 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435373 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435386 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435421 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435475 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket" (OuterVolumeSpecName: "log-socket") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435506 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435532 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435539 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435511 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435615 4967 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435632 4967 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435644 4967 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435654 4967 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-log-socket\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435664 4967 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435678 4967 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435690 4967 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435704 4967 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435707 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435715 4967 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-slash\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435758 4967 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-node-log\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435774 4967 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435789 4967 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435806 4967 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435820 4967 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435832 4967 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.435865 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.443073 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.453995 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-wk7f7"] Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454218 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454231 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454239 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="extract" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454245 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="extract" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454257 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454263 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454271 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="nbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454278 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="nbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454289 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d59513f6-55e3-4bbb-9207-cdd4936124cd" containerName="collect-profiles" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454296 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d59513f6-55e3-4bbb-9207-cdd4936124cd" containerName="collect-profiles" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454322 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454328 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454338 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-acl-logging" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454343 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-acl-logging" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454352 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454357 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454365 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="northd" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454371 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="northd" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454383 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="pull" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454389 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="pull" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454394 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454400 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454406 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-node" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454412 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-node" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454418 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="sbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454423 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="sbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454430 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="util" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454436 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="util" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454443 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kubecfg-setup" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454448 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kubecfg-setup" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454542 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454550 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454589 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3989e919-932b-4153-98cf-5f1ebcc40f89" containerName="extract" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454599 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454608 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454618 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovn-acl-logging" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454626 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454632 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d59513f6-55e3-4bbb-9207-cdd4936124cd" containerName="collect-profiles" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454640 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="nbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454649 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="northd" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454655 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="sbdb" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454662 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="kube-rbac-proxy-node" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454755 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454762 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454861 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454870 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: E1121 15:45:09.454974 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.454980 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerName="ovnkube-controller" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.456604 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.460807 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc" (OuterVolumeSpecName: "kube-api-access-wvcsc") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "kube-api-access-wvcsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.464990 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "eeb9277d-9a26-4665-a01c-9ed1c379e8dd" (UID: "eeb9277d-9a26-4665-a01c-9ed1c379e8dd"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536657 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-log-socket\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536702 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-etc-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536725 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-systemd-units\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536742 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536873 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536926 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-node-log\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536961 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kvxd\" (UniqueName: \"kubernetes.io/projected/6578ac61-df72-4440-9f4e-2165a51d52fa-kube-api-access-2kvxd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.536993 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-bin\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537036 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6578ac61-df72-4440-9f4e-2165a51d52fa-ovn-node-metrics-cert\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537074 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-ovn\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537093 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-netns\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537117 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-script-lib\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537140 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537169 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-netd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537218 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-env-overrides\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-kubelet\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537266 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-systemd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537331 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-slash\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537364 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-config\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537386 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-var-lib-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537452 4967 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537467 4967 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537479 4967 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537491 4967 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.537502 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvcsc\" (UniqueName: \"kubernetes.io/projected/eeb9277d-9a26-4665-a01c-9ed1c379e8dd-kube-api-access-wvcsc\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638824 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6578ac61-df72-4440-9f4e-2165a51d52fa-ovn-node-metrics-cert\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638881 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-ovn\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638899 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-netns\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638915 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-script-lib\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638943 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638962 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-netd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.638987 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-env-overrides\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639001 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-systemd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639035 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-kubelet\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639060 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-slash\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639087 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-config\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639104 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-var-lib-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639163 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-log-socket\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639180 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-etc-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639221 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-systemd-units\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639242 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639262 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639293 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-node-log\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639341 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kvxd\" (UniqueName: \"kubernetes.io/projected/6578ac61-df72-4440-9f4e-2165a51d52fa-kube-api-access-2kvxd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639360 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-bin\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.639431 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-bin\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.641230 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-var-lib-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.641267 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-ovn\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.641290 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-netns\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.641826 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-script-lib\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642025 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642055 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-cni-netd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642348 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-env-overrides\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642391 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-systemd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642418 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-kubelet\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-slash\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.642959 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6578ac61-df72-4440-9f4e-2165a51d52fa-ovnkube-config\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643607 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-log-socket\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643635 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-etc-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643660 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-host-run-ovn-kubernetes\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643811 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-systemd-units\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643842 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-node-log\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.643866 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6578ac61-df72-4440-9f4e-2165a51d52fa-run-openvswitch\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.644503 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6578ac61-df72-4440-9f4e-2165a51d52fa-ovn-node-metrics-cert\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.669697 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kvxd\" (UniqueName: \"kubernetes.io/projected/6578ac61-df72-4440-9f4e-2165a51d52fa-kube-api-access-2kvxd\") pod \"ovnkube-node-wk7f7\" (UID: \"6578ac61-df72-4440-9f4e-2165a51d52fa\") " pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:09 crc kubenswrapper[4967]: I1121 15:45:09.807784 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.181859 4967 generic.go:334] "Generic (PLEG): container finished" podID="6578ac61-df72-4440-9f4e-2165a51d52fa" containerID="a58fbb2365f5b9772a7d77d6e5d016bc8d21645b9803c23f704552f64707f79f" exitCode=0 Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.181958 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerDied","Data":"a58fbb2365f5b9772a7d77d6e5d016bc8d21645b9803c23f704552f64707f79f"} Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.182263 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"0c9a3aa9cbb5b5becb7479a51062a18121a016ca107c1b1e9cedc2a6d004b3e0"} Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.186985 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-acl-logging/0.log" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187388 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-zm492_eeb9277d-9a26-4665-a01c-9ed1c379e8dd/ovn-controller/0.log" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187713 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" exitCode=0 Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187741 4967 generic.go:334] "Generic (PLEG): container finished" podID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" containerID="3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" exitCode=0 Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187799 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4"} Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187828 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b"} Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187846 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" event={"ID":"eeb9277d-9a26-4665-a01c-9ed1c379e8dd","Type":"ContainerDied","Data":"fa379cf57929b4b3e288f4fa9571c203fd83769bbd890c72ac259ac7dcbbfdf2"} Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187867 4967 scope.go:117] "RemoveContainer" containerID="07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.187978 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zm492" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.196740 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/2.log" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.216946 4967 scope.go:117] "RemoveContainer" containerID="2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.232775 4967 scope.go:117] "RemoveContainer" containerID="4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.261524 4967 scope.go:117] "RemoveContainer" containerID="e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.271805 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zm492"] Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.292059 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zm492"] Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.298470 4967 scope.go:117] "RemoveContainer" containerID="159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.340643 4967 scope.go:117] "RemoveContainer" containerID="3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.391896 4967 scope.go:117] "RemoveContainer" containerID="7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.470480 4967 scope.go:117] "RemoveContainer" containerID="1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.496020 4967 scope.go:117] "RemoveContainer" containerID="beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.517935 4967 scope.go:117] "RemoveContainer" containerID="07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.521708 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6\": container with ID starting with 07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6 not found: ID does not exist" containerID="07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.521753 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6"} err="failed to get container status \"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6\": rpc error: code = NotFound desc = could not find container \"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6\": container with ID starting with 07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.521782 4967 scope.go:117] "RemoveContainer" containerID="2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.524393 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\": container with ID starting with 2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92 not found: ID does not exist" containerID="2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.524430 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92"} err="failed to get container status \"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\": rpc error: code = NotFound desc = could not find container \"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\": container with ID starting with 2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.524464 4967 scope.go:117] "RemoveContainer" containerID="4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.525960 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\": container with ID starting with 4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75 not found: ID does not exist" containerID="4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.526006 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75"} err="failed to get container status \"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\": rpc error: code = NotFound desc = could not find container \"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\": container with ID starting with 4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.526045 4967 scope.go:117] "RemoveContainer" containerID="e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.526557 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\": container with ID starting with e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d not found: ID does not exist" containerID="e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.526594 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d"} err="failed to get container status \"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\": rpc error: code = NotFound desc = could not find container \"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\": container with ID starting with e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.526617 4967 scope.go:117] "RemoveContainer" containerID="159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.528929 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\": container with ID starting with 159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4 not found: ID does not exist" containerID="159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.528962 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4"} err="failed to get container status \"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\": rpc error: code = NotFound desc = could not find container \"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\": container with ID starting with 159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.528985 4967 scope.go:117] "RemoveContainer" containerID="3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.529192 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\": container with ID starting with 3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b not found: ID does not exist" containerID="3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.529219 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b"} err="failed to get container status \"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\": rpc error: code = NotFound desc = could not find container \"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\": container with ID starting with 3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.529236 4967 scope.go:117] "RemoveContainer" containerID="7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.529773 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\": container with ID starting with 7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381 not found: ID does not exist" containerID="7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.529827 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381"} err="failed to get container status \"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\": rpc error: code = NotFound desc = could not find container \"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\": container with ID starting with 7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.529859 4967 scope.go:117] "RemoveContainer" containerID="1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.530196 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\": container with ID starting with 1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce not found: ID does not exist" containerID="1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530230 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce"} err="failed to get container status \"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\": rpc error: code = NotFound desc = could not find container \"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\": container with ID starting with 1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530251 4967 scope.go:117] "RemoveContainer" containerID="beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020" Nov 21 15:45:10 crc kubenswrapper[4967]: E1121 15:45:10.530569 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\": container with ID starting with beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020 not found: ID does not exist" containerID="beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530598 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020"} err="failed to get container status \"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\": rpc error: code = NotFound desc = could not find container \"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\": container with ID starting with beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530617 4967 scope.go:117] "RemoveContainer" containerID="07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530862 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6"} err="failed to get container status \"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6\": rpc error: code = NotFound desc = could not find container \"07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6\": container with ID starting with 07508a326bea35e0f0e46f7eddb2426f11ab562e5693b62e4effc13bdfcc51f6 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.530888 4967 scope.go:117] "RemoveContainer" containerID="2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.531237 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92"} err="failed to get container status \"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\": rpc error: code = NotFound desc = could not find container \"2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92\": container with ID starting with 2b2bb912e09b124cd20a56ddef7efaaa1c6c5cd40124ce50fbc7119294caed92 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.531270 4967 scope.go:117] "RemoveContainer" containerID="4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.531606 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75"} err="failed to get container status \"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\": rpc error: code = NotFound desc = could not find container \"4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75\": container with ID starting with 4e0dd236f0e855c6d8fea96323e805b97c1e7cec69d82cf123ab926da108ee75 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.531634 4967 scope.go:117] "RemoveContainer" containerID="e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532028 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d"} err="failed to get container status \"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\": rpc error: code = NotFound desc = could not find container \"e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d\": container with ID starting with e21ad55b6c0943a3331674a82edfb484f23eaa46e227c7fb87ac9e93b54cd56d not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532053 4967 scope.go:117] "RemoveContainer" containerID="159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532246 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4"} err="failed to get container status \"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\": rpc error: code = NotFound desc = could not find container \"159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4\": container with ID starting with 159ab3505144f1786184c58bb052beeded95a30d965c433038ea32e0bb7049e4 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532272 4967 scope.go:117] "RemoveContainer" containerID="3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532552 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b"} err="failed to get container status \"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\": rpc error: code = NotFound desc = could not find container \"3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b\": container with ID starting with 3576f9107b8c551dc90649c2bd667b60fb8e5abd2629ca9f3fdfbe22a589666b not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532575 4967 scope.go:117] "RemoveContainer" containerID="7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532806 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381"} err="failed to get container status \"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\": rpc error: code = NotFound desc = could not find container \"7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381\": container with ID starting with 7f02beb49c57b148ad62e1de2ff34fa7b9daf1ef6c57e04514657351a65f9381 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.532828 4967 scope.go:117] "RemoveContainer" containerID="1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.533019 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce"} err="failed to get container status \"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\": rpc error: code = NotFound desc = could not find container \"1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce\": container with ID starting with 1b3b0c3e45cf3c658c7e02c139ff1cbfd4dea60b6d175d093aa8c2e89d398dce not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.533040 4967 scope.go:117] "RemoveContainer" containerID="beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.533229 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020"} err="failed to get container status \"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\": rpc error: code = NotFound desc = could not find container \"beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020\": container with ID starting with beade2830d95bdf4391a9c86678586bfa227d7d7ece8a186165d899e2ee3d020 not found: ID does not exist" Nov 21 15:45:10 crc kubenswrapper[4967]: I1121 15:45:10.549133 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eeb9277d-9a26-4665-a01c-9ed1c379e8dd" path="/var/lib/kubelet/pods/eeb9277d-9a26-4665-a01c-9ed1c379e8dd/volumes" Nov 21 15:45:11 crc kubenswrapper[4967]: I1121 15:45:11.209913 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"1e776c1d71e97c13b485a89c228d5d30586806621390b06d147387745c70a760"} Nov 21 15:45:11 crc kubenswrapper[4967]: I1121 15:45:11.209960 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"b24f0871b86891a54480796b2077ad3b7f013f78f6cad0bc370a49ff4d20db91"} Nov 21 15:45:11 crc kubenswrapper[4967]: I1121 15:45:11.209974 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"af3179fc106dbd812fc7622c62cb8980af3e17cdcfff35e97c16900851b206dd"} Nov 21 15:45:11 crc kubenswrapper[4967]: I1121 15:45:11.209985 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"5d845d2b0a63953f0868bcfa4e550aaf75d49ac5cf72c2caaca64014f62433f6"} Nov 21 15:45:11 crc kubenswrapper[4967]: I1121 15:45:11.209996 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"ed074a52fcedd3e4f2f6bc15f1885977de6729f397491f1547c6731d9a708c82"} Nov 21 15:45:12 crc kubenswrapper[4967]: I1121 15:45:12.220066 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"d5ae195a5306933fcb71b3f5dd2d405b3f13990747da9d5494679ad75a26da3b"} Nov 21 15:45:14 crc kubenswrapper[4967]: I1121 15:45:14.234935 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"ab72bb08ed26cbf9545decae56bca7011be81ab86bbfca85f27abc8c4162ec54"} Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.120246 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49"] Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.122680 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.138037 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.140626 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.142900 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-bvf64" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.224515 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjnhx\" (UniqueName: \"kubernetes.io/projected/f6e9b133-82e0-4185-9fa4-7007ffe75f5d-kube-api-access-vjnhx\") pod \"obo-prometheus-operator-668cf9dfbb-wqt49\" (UID: \"f6e9b133-82e0-4185-9fa4-7007ffe75f5d\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.249080 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp"] Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.250128 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.252286 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-l4ps5" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.253047 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.272291 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj"] Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.274512 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.326612 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.326700 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjnhx\" (UniqueName: \"kubernetes.io/projected/f6e9b133-82e0-4185-9fa4-7007ffe75f5d-kube-api-access-vjnhx\") pod \"obo-prometheus-operator-668cf9dfbb-wqt49\" (UID: \"f6e9b133-82e0-4185-9fa4-7007ffe75f5d\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.326747 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.326766 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.326784 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.353554 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjnhx\" (UniqueName: \"kubernetes.io/projected/f6e9b133-82e0-4185-9fa4-7007ffe75f5d-kube-api-access-vjnhx\") pod \"obo-prometheus-operator-668cf9dfbb-wqt49\" (UID: \"f6e9b133-82e0-4185-9fa4-7007ffe75f5d\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.429060 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.429209 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.429239 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.429261 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.436245 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.440938 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.441810 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/aec55e5b-3112-46e8-bc8b-c643e8fca0fe-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp\" (UID: \"aec55e5b-3112-46e8-bc8b-c643e8fca0fe\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.457070 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pr4jn"] Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.459074 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.462933 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bf8c40bc-4733-49aa-b2e2-9297e0b7bd30-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj\" (UID: \"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.463208 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-jx4sf" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.463570 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.469025 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.514533 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(324bc77ba73ee525ab714056d2a6e80dab70e424ac06b29c519229ca4334b5f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.514977 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(324bc77ba73ee525ab714056d2a6e80dab70e424ac06b29c519229ca4334b5f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.515010 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(324bc77ba73ee525ab714056d2a6e80dab70e424ac06b29c519229ca4334b5f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.515062 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(324bc77ba73ee525ab714056d2a6e80dab70e424ac06b29c519229ca4334b5f6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" podUID="f6e9b133-82e0-4185-9fa4-7007ffe75f5d" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.530457 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnpqt\" (UniqueName: \"kubernetes.io/projected/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-kube-api-access-cnpqt\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.530535 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.571570 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.579213 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-6tvkc"] Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.579918 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.581983 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-882mr" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.592052 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.617964 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(eefdcd50f69ba41ee6825468ad0bcf2c84ac502fd049e9943164a7378d184e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.618042 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(eefdcd50f69ba41ee6825468ad0bcf2c84ac502fd049e9943164a7378d184e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.618068 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(eefdcd50f69ba41ee6825468ad0bcf2c84ac502fd049e9943164a7378d184e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.618119 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(eefdcd50f69ba41ee6825468ad0bcf2c84ac502fd049e9943164a7378d184e1b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" podUID="aec55e5b-3112-46e8-bc8b-c643e8fca0fe" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.634081 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77dhm\" (UniqueName: \"kubernetes.io/projected/ec826dcc-83f7-4138-b93c-25603f94599a-kube-api-access-77dhm\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.634215 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ec826dcc-83f7-4138-b93c-25603f94599a-openshift-service-ca\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.634254 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnpqt\" (UniqueName: \"kubernetes.io/projected/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-kube-api-access-cnpqt\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.634279 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.639148 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.657953 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnpqt\" (UniqueName: \"kubernetes.io/projected/0070e88a-ae9a-4436-ab1f-4e8e4e2ba557-kube-api-access-cnpqt\") pod \"observability-operator-d8bb48f5d-pr4jn\" (UID: \"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557\") " pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.669000 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(013681c04e44770472338dee8064a851353984be90d22593c364c273a741e680): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.669093 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(013681c04e44770472338dee8064a851353984be90d22593c364c273a741e680): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.669118 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(013681c04e44770472338dee8064a851353984be90d22593c364c273a741e680): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.669178 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(013681c04e44770472338dee8064a851353984be90d22593c364c273a741e680): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" podUID="bf8c40bc-4733-49aa-b2e2-9297e0b7bd30" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.736723 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ec826dcc-83f7-4138-b93c-25603f94599a-openshift-service-ca\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.737069 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77dhm\" (UniqueName: \"kubernetes.io/projected/ec826dcc-83f7-4138-b93c-25603f94599a-kube-api-access-77dhm\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.737857 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/ec826dcc-83f7-4138-b93c-25603f94599a-openshift-service-ca\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.760569 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77dhm\" (UniqueName: \"kubernetes.io/projected/ec826dcc-83f7-4138-b93c-25603f94599a-kube-api-access-77dhm\") pod \"perses-operator-5446b9c989-6tvkc\" (UID: \"ec826dcc-83f7-4138-b93c-25603f94599a\") " pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.843262 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.873392 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(44e15763fc79cd4361ad8817d54c123a0b5278856f4ac892c6442a6c07088448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.873486 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(44e15763fc79cd4361ad8817d54c123a0b5278856f4ac892c6442a6c07088448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.873512 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(44e15763fc79cd4361ad8817d54c123a0b5278856f4ac892c6442a6c07088448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.873578 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(44e15763fc79cd4361ad8817d54c123a0b5278856f4ac892c6442a6c07088448): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" podUID="0070e88a-ae9a-4436-ab1f-4e8e4e2ba557" Nov 21 15:45:15 crc kubenswrapper[4967]: I1121 15:45:15.901959 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.926903 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(e8bf2c273ae5b9e71807dca099b7ad1110533607c5ef7702200619bce8959f4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.926965 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(e8bf2c273ae5b9e71807dca099b7ad1110533607c5ef7702200619bce8959f4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.926985 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(e8bf2c273ae5b9e71807dca099b7ad1110533607c5ef7702200619bce8959f4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:15 crc kubenswrapper[4967]: E1121 15:45:15.927061 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(e8bf2c273ae5b9e71807dca099b7ad1110533607c5ef7702200619bce8959f4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" podUID="ec826dcc-83f7-4138-b93c-25603f94599a" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.251549 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" event={"ID":"6578ac61-df72-4440-9f4e-2165a51d52fa","Type":"ContainerStarted","Data":"f9331cf6cea5f15fe14dbcc75fa6ce45f52ac9aae60a4b5e405dd17356fd58b5"} Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.251973 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.251991 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.277545 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" podStartSLOduration=7.277524839 podStartE2EDuration="7.277524839s" podCreationTimestamp="2025-11-21 15:45:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:45:16.277166129 +0000 UTC m=+604.535687157" watchObservedRunningTime="2025-11-21 15:45:16.277524839 +0000 UTC m=+604.536045847" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.289822 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.294565 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.522199 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.522294 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.522417 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.523276 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.523359 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a" gracePeriod=600 Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.764517 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp"] Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.764643 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.765119 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.775773 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-6tvkc"] Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.775926 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.776573 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.811413 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj"] Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.811539 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.812019 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.857818 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49"] Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.857976 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.858648 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.875162 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(ddb0a426282af80be612133312b3e49f5b5823de413e5b1cc94dc88e82fb4308): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.875234 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(ddb0a426282af80be612133312b3e49f5b5823de413e5b1cc94dc88e82fb4308): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.875279 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(ddb0a426282af80be612133312b3e49f5b5823de413e5b1cc94dc88e82fb4308): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.875360 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(ddb0a426282af80be612133312b3e49f5b5823de413e5b1cc94dc88e82fb4308): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" podUID="aec55e5b-3112-46e8-bc8b-c643e8fca0fe" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.882612 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(4d17b310a13426a05e839c2d0865d415df1103a62d08cf7b089215ba2afd0ba0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.882689 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(4d17b310a13426a05e839c2d0865d415df1103a62d08cf7b089215ba2afd0ba0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.882715 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(4d17b310a13426a05e839c2d0865d415df1103a62d08cf7b089215ba2afd0ba0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.882768 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(4d17b310a13426a05e839c2d0865d415df1103a62d08cf7b089215ba2afd0ba0): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" podUID="ec826dcc-83f7-4138-b93c-25603f94599a" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.892387 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(d6d715ca812f3c46b8aedeaa17ffe7e1520bdc0b719036927a5cb28248fe65f8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.892529 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(d6d715ca812f3c46b8aedeaa17ffe7e1520bdc0b719036927a5cb28248fe65f8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.892557 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(d6d715ca812f3c46b8aedeaa17ffe7e1520bdc0b719036927a5cb28248fe65f8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.892624 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(d6d715ca812f3c46b8aedeaa17ffe7e1520bdc0b719036927a5cb28248fe65f8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" podUID="bf8c40bc-4733-49aa-b2e2-9297e0b7bd30" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.914969 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(56573bc47549395f16714e6e9ebf75bb8004c07bb0b96db05da2b02af532b27a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.915444 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(56573bc47549395f16714e6e9ebf75bb8004c07bb0b96db05da2b02af532b27a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.915479 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(56573bc47549395f16714e6e9ebf75bb8004c07bb0b96db05da2b02af532b27a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.915552 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(56573bc47549395f16714e6e9ebf75bb8004c07bb0b96db05da2b02af532b27a): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" podUID="f6e9b133-82e0-4185-9fa4-7007ffe75f5d" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.934181 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pr4jn"] Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.934342 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:16 crc kubenswrapper[4967]: I1121 15:45:16.935036 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.974103 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(25e809423a9ae77415d6ace3ae5b10acae17e973f20ef3407de4ebb053e2b775): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.974200 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(25e809423a9ae77415d6ace3ae5b10acae17e973f20ef3407de4ebb053e2b775): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.974231 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(25e809423a9ae77415d6ace3ae5b10acae17e973f20ef3407de4ebb053e2b775): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:16 crc kubenswrapper[4967]: E1121 15:45:16.974303 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(25e809423a9ae77415d6ace3ae5b10acae17e973f20ef3407de4ebb053e2b775): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" podUID="0070e88a-ae9a-4436-ab1f-4e8e4e2ba557" Nov 21 15:45:17 crc kubenswrapper[4967]: I1121 15:45:17.260997 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a" exitCode=0 Nov 21 15:45:17 crc kubenswrapper[4967]: I1121 15:45:17.261050 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a"} Nov 21 15:45:17 crc kubenswrapper[4967]: I1121 15:45:17.261111 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8"} Nov 21 15:45:17 crc kubenswrapper[4967]: I1121 15:45:17.261135 4967 scope.go:117] "RemoveContainer" containerID="77fede66988dd8e2022052388678be1ae75dcef265f91ac9300614230678fc4b" Nov 21 15:45:17 crc kubenswrapper[4967]: I1121 15:45:17.261721 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:22 crc kubenswrapper[4967]: I1121 15:45:22.540790 4967 scope.go:117] "RemoveContainer" containerID="3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b" Nov 21 15:45:22 crc kubenswrapper[4967]: E1121 15:45:22.541631 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-j4dcx_openshift-multus(629a5f41-3cd8-4518-a833-2832f4ebe55a)\"" pod="openshift-multus/multus-j4dcx" podUID="629a5f41-3cd8-4518-a833-2832f4ebe55a" Nov 21 15:45:28 crc kubenswrapper[4967]: I1121 15:45:28.536027 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:28 crc kubenswrapper[4967]: I1121 15:45:28.536117 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:28 crc kubenswrapper[4967]: I1121 15:45:28.536978 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:28 crc kubenswrapper[4967]: I1121 15:45:28.537072 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.619511 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(8d76ab9563b440976d9a18a22c710b6e55b1c2603a3e421471a3e074891e9d6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.619581 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(8d76ab9563b440976d9a18a22c710b6e55b1c2603a3e421471a3e074891e9d6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.619609 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(8d76ab9563b440976d9a18a22c710b6e55b1c2603a3e421471a3e074891e9d6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.619661 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators(f6e9b133-82e0-4185-9fa4-7007ffe75f5d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-wqt49_openshift-operators_f6e9b133-82e0-4185-9fa4-7007ffe75f5d_0(8d76ab9563b440976d9a18a22c710b6e55b1c2603a3e421471a3e074891e9d6d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" podUID="f6e9b133-82e0-4185-9fa4-7007ffe75f5d" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.644693 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(a67cf1acb004aea6720846ba7f44deb4b9e8b627f41bba65e46e64515bfc3e19): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.644773 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(a67cf1acb004aea6720846ba7f44deb4b9e8b627f41bba65e46e64515bfc3e19): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.644799 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(a67cf1acb004aea6720846ba7f44deb4b9e8b627f41bba65e46e64515bfc3e19): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:28 crc kubenswrapper[4967]: E1121 15:45:28.644861 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators(bf8c40bc-4733-49aa-b2e2-9297e0b7bd30)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_openshift-operators_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30_0(a67cf1acb004aea6720846ba7f44deb4b9e8b627f41bba65e46e64515bfc3e19): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" podUID="bf8c40bc-4733-49aa-b2e2-9297e0b7bd30" Nov 21 15:45:29 crc kubenswrapper[4967]: I1121 15:45:29.535372 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:29 crc kubenswrapper[4967]: I1121 15:45:29.535380 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:29 crc kubenswrapper[4967]: I1121 15:45:29.535882 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:29 crc kubenswrapper[4967]: I1121 15:45:29.536096 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.568753 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(b92846629129dbde0105829cdfd7d2b3a3c4915be7c3d3552a61016173d64500): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.568839 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(b92846629129dbde0105829cdfd7d2b3a3c4915be7c3d3552a61016173d64500): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.568868 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(b92846629129dbde0105829cdfd7d2b3a3c4915be7c3d3552a61016173d64500): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.568954 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-pr4jn_openshift-operators(0070e88a-ae9a-4436-ab1f-4e8e4e2ba557)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-pr4jn_openshift-operators_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557_0(b92846629129dbde0105829cdfd7d2b3a3c4915be7c3d3552a61016173d64500): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" podUID="0070e88a-ae9a-4436-ab1f-4e8e4e2ba557" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.578082 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(c96753eeba3ffbe6531c3b3b77ab2297f82d7ff9a8b50d6bfa27cd19b7e47b6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.578134 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(c96753eeba3ffbe6531c3b3b77ab2297f82d7ff9a8b50d6bfa27cd19b7e47b6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.578159 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(c96753eeba3ffbe6531c3b3b77ab2297f82d7ff9a8b50d6bfa27cd19b7e47b6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:29 crc kubenswrapper[4967]: E1121 15:45:29.578201 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators(aec55e5b-3112-46e8-bc8b-c643e8fca0fe)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_openshift-operators_aec55e5b-3112-46e8-bc8b-c643e8fca0fe_0(c96753eeba3ffbe6531c3b3b77ab2297f82d7ff9a8b50d6bfa27cd19b7e47b6e): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" podUID="aec55e5b-3112-46e8-bc8b-c643e8fca0fe" Nov 21 15:45:30 crc kubenswrapper[4967]: I1121 15:45:30.535541 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:30 crc kubenswrapper[4967]: I1121 15:45:30.536242 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:30 crc kubenswrapper[4967]: E1121 15:45:30.570529 4967 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(0d1a9d49e29c16436e239578df376e4d39a469b4272bd928c70bde4971b9a3f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 15:45:30 crc kubenswrapper[4967]: E1121 15:45:30.570608 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(0d1a9d49e29c16436e239578df376e4d39a469b4272bd928c70bde4971b9a3f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:30 crc kubenswrapper[4967]: E1121 15:45:30.570637 4967 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(0d1a9d49e29c16436e239578df376e4d39a469b4272bd928c70bde4971b9a3f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:30 crc kubenswrapper[4967]: E1121 15:45:30.570689 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-6tvkc_openshift-operators(ec826dcc-83f7-4138-b93c-25603f94599a)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-6tvkc_openshift-operators_ec826dcc-83f7-4138-b93c-25603f94599a_0(0d1a9d49e29c16436e239578df376e4d39a469b4272bd928c70bde4971b9a3f2): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" podUID="ec826dcc-83f7-4138-b93c-25603f94599a" Nov 21 15:45:36 crc kubenswrapper[4967]: I1121 15:45:36.536739 4967 scope.go:117] "RemoveContainer" containerID="3d70f335c8c42e0b38f593261c45810a3f14e8876d93fdc04908ec56a235c11b" Nov 21 15:45:37 crc kubenswrapper[4967]: I1121 15:45:37.376488 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-j4dcx_629a5f41-3cd8-4518-a833-2832f4ebe55a/kube-multus/2.log" Nov 21 15:45:37 crc kubenswrapper[4967]: I1121 15:45:37.376831 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-j4dcx" event={"ID":"629a5f41-3cd8-4518-a833-2832f4ebe55a","Type":"ContainerStarted","Data":"a2c9c93db5eb99e835a373795ac160ad10791ac0fb4c043e494e384cc32b0103"} Nov 21 15:45:39 crc kubenswrapper[4967]: I1121 15:45:39.832518 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-wk7f7" Nov 21 15:45:41 crc kubenswrapper[4967]: I1121 15:45:41.536165 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:41 crc kubenswrapper[4967]: I1121 15:45:41.536872 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" Nov 21 15:45:41 crc kubenswrapper[4967]: I1121 15:45:41.929967 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj"] Nov 21 15:45:41 crc kubenswrapper[4967]: W1121 15:45:41.934916 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf8c40bc_4733_49aa_b2e2_9297e0b7bd30.slice/crio-1e2f1531687ab206c00f9506f24a76a5febc2ae8297130bce4ec6976374978c1 WatchSource:0}: Error finding container 1e2f1531687ab206c00f9506f24a76a5febc2ae8297130bce4ec6976374978c1: Status 404 returned error can't find the container with id 1e2f1531687ab206c00f9506f24a76a5febc2ae8297130bce4ec6976374978c1 Nov 21 15:45:42 crc kubenswrapper[4967]: I1121 15:45:42.408467 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" event={"ID":"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30","Type":"ContainerStarted","Data":"1e2f1531687ab206c00f9506f24a76a5febc2ae8297130bce4ec6976374978c1"} Nov 21 15:45:42 crc kubenswrapper[4967]: I1121 15:45:42.535728 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:42 crc kubenswrapper[4967]: I1121 15:45:42.540651 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" Nov 21 15:45:42 crc kubenswrapper[4967]: I1121 15:45:42.741917 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49"] Nov 21 15:45:42 crc kubenswrapper[4967]: W1121 15:45:42.749749 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6e9b133_82e0_4185_9fa4_7007ffe75f5d.slice/crio-3b79e9df85e9c27b1fa5489e6ee606abfa10120113dc481c56fe5bb8ac115d32 WatchSource:0}: Error finding container 3b79e9df85e9c27b1fa5489e6ee606abfa10120113dc481c56fe5bb8ac115d32: Status 404 returned error can't find the container with id 3b79e9df85e9c27b1fa5489e6ee606abfa10120113dc481c56fe5bb8ac115d32 Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.415200 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" event={"ID":"f6e9b133-82e0-4185-9fa4-7007ffe75f5d","Type":"ContainerStarted","Data":"3b79e9df85e9c27b1fa5489e6ee606abfa10120113dc481c56fe5bb8ac115d32"} Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.536128 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.536391 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.536403 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.537166 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.537377 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.537768 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.767627 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-pr4jn"] Nov 21 15:45:43 crc kubenswrapper[4967]: W1121 15:45:43.775329 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0070e88a_ae9a_4436_ab1f_4e8e4e2ba557.slice/crio-dcbb8863bb66c173d1781a22645a273a0d05afae14decae6825c7631d0876c91 WatchSource:0}: Error finding container dcbb8863bb66c173d1781a22645a273a0d05afae14decae6825c7631d0876c91: Status 404 returned error can't find the container with id dcbb8863bb66c173d1781a22645a273a0d05afae14decae6825c7631d0876c91 Nov 21 15:45:43 crc kubenswrapper[4967]: I1121 15:45:43.831044 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp"] Nov 21 15:45:43 crc kubenswrapper[4967]: W1121 15:45:43.831522 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaec55e5b_3112_46e8_bc8b_c643e8fca0fe.slice/crio-aebce0ef608dfdb543f57e4e323bfda5aa1467ad14f2540cbf4f4a4a72829ee4 WatchSource:0}: Error finding container aebce0ef608dfdb543f57e4e323bfda5aa1467ad14f2540cbf4f4a4a72829ee4: Status 404 returned error can't find the container with id aebce0ef608dfdb543f57e4e323bfda5aa1467ad14f2540cbf4f4a4a72829ee4 Nov 21 15:45:44 crc kubenswrapper[4967]: I1121 15:45:44.023357 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-6tvkc"] Nov 21 15:45:44 crc kubenswrapper[4967]: W1121 15:45:44.029802 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podec826dcc_83f7_4138_b93c_25603f94599a.slice/crio-d1f0dcc722bd59ad98860b4e904174107a2ecb162e59f55486099467bca09baa WatchSource:0}: Error finding container d1f0dcc722bd59ad98860b4e904174107a2ecb162e59f55486099467bca09baa: Status 404 returned error can't find the container with id d1f0dcc722bd59ad98860b4e904174107a2ecb162e59f55486099467bca09baa Nov 21 15:45:44 crc kubenswrapper[4967]: I1121 15:45:44.421862 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" event={"ID":"ec826dcc-83f7-4138-b93c-25603f94599a","Type":"ContainerStarted","Data":"d1f0dcc722bd59ad98860b4e904174107a2ecb162e59f55486099467bca09baa"} Nov 21 15:45:44 crc kubenswrapper[4967]: I1121 15:45:44.423233 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" event={"ID":"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557","Type":"ContainerStarted","Data":"dcbb8863bb66c173d1781a22645a273a0d05afae14decae6825c7631d0876c91"} Nov 21 15:45:44 crc kubenswrapper[4967]: I1121 15:45:44.424366 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" event={"ID":"aec55e5b-3112-46e8-bc8b-c643e8fca0fe","Type":"ContainerStarted","Data":"aebce0ef608dfdb543f57e4e323bfda5aa1467ad14f2540cbf4f4a4a72829ee4"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.494010 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" event={"ID":"bf8c40bc-4733-49aa-b2e2-9297e0b7bd30","Type":"ContainerStarted","Data":"806ce7093b5e81954c43f95b1d26c3419f39c3575017a9469571d6d31b696809"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.495328 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" event={"ID":"0070e88a-ae9a-4436-ab1f-4e8e4e2ba557","Type":"ContainerStarted","Data":"77671fe57c62855024168c54deb590041ebd5c081dd762869430dde57b1a718d"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.495890 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.498177 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.498732 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" event={"ID":"f6e9b133-82e0-4185-9fa4-7007ffe75f5d","Type":"ContainerStarted","Data":"1d7577c014de6c8a0a9614736035d9e24157f0f2a25ebbdf22d3f882d396cf81"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.500660 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" event={"ID":"aec55e5b-3112-46e8-bc8b-c643e8fca0fe","Type":"ContainerStarted","Data":"7875acdc44c4d85c5b4df202079c9458d3a5422e07989e2ed2200a8ce39ccf9b"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.502560 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" event={"ID":"ec826dcc-83f7-4138-b93c-25603f94599a","Type":"ContainerStarted","Data":"17d600ca762ef0a910e227654302e76e08e94d413025149d5febb7cc1f0d7dc4"} Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.503022 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.524036 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-xtlzj" podStartSLOduration=27.902253268 podStartE2EDuration="39.523966397s" podCreationTimestamp="2025-11-21 15:45:15 +0000 UTC" firstStartedPulling="2025-11-21 15:45:41.937541819 +0000 UTC m=+630.196062827" lastFinishedPulling="2025-11-21 15:45:53.559254948 +0000 UTC m=+641.817775956" observedRunningTime="2025-11-21 15:45:54.520623451 +0000 UTC m=+642.779144479" watchObservedRunningTime="2025-11-21 15:45:54.523966397 +0000 UTC m=+642.782487455" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.573995 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-pr4jn" podStartSLOduration=29.749809821 podStartE2EDuration="39.573973667s" podCreationTimestamp="2025-11-21 15:45:15 +0000 UTC" firstStartedPulling="2025-11-21 15:45:43.778962985 +0000 UTC m=+632.037484003" lastFinishedPulling="2025-11-21 15:45:53.603126841 +0000 UTC m=+641.861647849" observedRunningTime="2025-11-21 15:45:54.57166418 +0000 UTC m=+642.830185188" watchObservedRunningTime="2025-11-21 15:45:54.573973667 +0000 UTC m=+642.832494675" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.574758 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-974477bf4-9z6rp" podStartSLOduration=29.850990993 podStartE2EDuration="39.574750279s" podCreationTimestamp="2025-11-21 15:45:15 +0000 UTC" firstStartedPulling="2025-11-21 15:45:43.83471107 +0000 UTC m=+632.093232078" lastFinishedPulling="2025-11-21 15:45:53.558470356 +0000 UTC m=+641.816991364" observedRunningTime="2025-11-21 15:45:54.547045252 +0000 UTC m=+642.805566260" watchObservedRunningTime="2025-11-21 15:45:54.574750279 +0000 UTC m=+642.833271287" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.596165 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-wqt49" podStartSLOduration=28.790366464 podStartE2EDuration="39.596149705s" podCreationTimestamp="2025-11-21 15:45:15 +0000 UTC" firstStartedPulling="2025-11-21 15:45:42.751675875 +0000 UTC m=+631.010196873" lastFinishedPulling="2025-11-21 15:45:53.557459106 +0000 UTC m=+641.815980114" observedRunningTime="2025-11-21 15:45:54.593992893 +0000 UTC m=+642.852513901" watchObservedRunningTime="2025-11-21 15:45:54.596149705 +0000 UTC m=+642.854670713" Nov 21 15:45:54 crc kubenswrapper[4967]: I1121 15:45:54.620480 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" podStartSLOduration=30.076479244 podStartE2EDuration="39.620466755s" podCreationTimestamp="2025-11-21 15:45:15 +0000 UTC" firstStartedPulling="2025-11-21 15:45:44.032527034 +0000 UTC m=+632.291048042" lastFinishedPulling="2025-11-21 15:45:53.576514545 +0000 UTC m=+641.835035553" observedRunningTime="2025-11-21 15:45:54.617945733 +0000 UTC m=+642.876466741" watchObservedRunningTime="2025-11-21 15:45:54.620466755 +0000 UTC m=+642.878987763" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.498778 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nv4pt"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.500200 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.504962 4967 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-ptjsm" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.510214 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.513689 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nv4pt"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.516537 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.526413 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pwx86"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.527390 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-pwx86" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.529213 4967 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-gff7g" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.535323 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-d4z6x"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.536391 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.538549 4967 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-6x4gx" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.558279 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pwx86"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.564632 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-d4z6x"] Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.588113 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjqn8\" (UniqueName: \"kubernetes.io/projected/2a0467f9-ffdb-41c1-9bdc-02224075b4f3-kube-api-access-vjqn8\") pod \"cert-manager-cainjector-7f985d654d-nv4pt\" (UID: \"2a0467f9-ffdb-41c1-9bdc-02224075b4f3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.588209 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxjzz\" (UniqueName: \"kubernetes.io/projected/cde8cf1b-1296-410b-82aa-a657c4118292-kube-api-access-bxjzz\") pod \"cert-manager-5b446d88c5-pwx86\" (UID: \"cde8cf1b-1296-410b-82aa-a657c4118292\") " pod="cert-manager/cert-manager-5b446d88c5-pwx86" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.689162 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr2fz\" (UniqueName: \"kubernetes.io/projected/f3fc7f11-f784-425a-a74a-c31e2be86970-kube-api-access-gr2fz\") pod \"cert-manager-webhook-5655c58dd6-d4z6x\" (UID: \"f3fc7f11-f784-425a-a74a-c31e2be86970\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.689303 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxjzz\" (UniqueName: \"kubernetes.io/projected/cde8cf1b-1296-410b-82aa-a657c4118292-kube-api-access-bxjzz\") pod \"cert-manager-5b446d88c5-pwx86\" (UID: \"cde8cf1b-1296-410b-82aa-a657c4118292\") " pod="cert-manager/cert-manager-5b446d88c5-pwx86" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.689401 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjqn8\" (UniqueName: \"kubernetes.io/projected/2a0467f9-ffdb-41c1-9bdc-02224075b4f3-kube-api-access-vjqn8\") pod \"cert-manager-cainjector-7f985d654d-nv4pt\" (UID: \"2a0467f9-ffdb-41c1-9bdc-02224075b4f3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.710203 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxjzz\" (UniqueName: \"kubernetes.io/projected/cde8cf1b-1296-410b-82aa-a657c4118292-kube-api-access-bxjzz\") pod \"cert-manager-5b446d88c5-pwx86\" (UID: \"cde8cf1b-1296-410b-82aa-a657c4118292\") " pod="cert-manager/cert-manager-5b446d88c5-pwx86" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.714232 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjqn8\" (UniqueName: \"kubernetes.io/projected/2a0467f9-ffdb-41c1-9bdc-02224075b4f3-kube-api-access-vjqn8\") pod \"cert-manager-cainjector-7f985d654d-nv4pt\" (UID: \"2a0467f9-ffdb-41c1-9bdc-02224075b4f3\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.790172 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr2fz\" (UniqueName: \"kubernetes.io/projected/f3fc7f11-f784-425a-a74a-c31e2be86970-kube-api-access-gr2fz\") pod \"cert-manager-webhook-5655c58dd6-d4z6x\" (UID: \"f3fc7f11-f784-425a-a74a-c31e2be86970\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.808109 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr2fz\" (UniqueName: \"kubernetes.io/projected/f3fc7f11-f784-425a-a74a-c31e2be86970-kube-api-access-gr2fz\") pod \"cert-manager-webhook-5655c58dd6-d4z6x\" (UID: \"f3fc7f11-f784-425a-a74a-c31e2be86970\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.817151 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.849094 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-pwx86" Nov 21 15:46:00 crc kubenswrapper[4967]: I1121 15:46:00.856476 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.087409 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pwx86"] Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.122759 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-d4z6x"] Nov 21 15:46:01 crc kubenswrapper[4967]: W1121 15:46:01.126082 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3fc7f11_f784_425a_a74a_c31e2be86970.slice/crio-842baf280b2e6603a605dbe001df1ee4338a7c43a243a6429351081bfd6ae878 WatchSource:0}: Error finding container 842baf280b2e6603a605dbe001df1ee4338a7c43a243a6429351081bfd6ae878: Status 404 returned error can't find the container with id 842baf280b2e6603a605dbe001df1ee4338a7c43a243a6429351081bfd6ae878 Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.230002 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-nv4pt"] Nov 21 15:46:01 crc kubenswrapper[4967]: W1121 15:46:01.231810 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a0467f9_ffdb_41c1_9bdc_02224075b4f3.slice/crio-cc4bb67b54f1ddb52d3150b47afd5afcad056744986ecd1866a0aa0d3154074a WatchSource:0}: Error finding container cc4bb67b54f1ddb52d3150b47afd5afcad056744986ecd1866a0aa0d3154074a: Status 404 returned error can't find the container with id cc4bb67b54f1ddb52d3150b47afd5afcad056744986ecd1866a0aa0d3154074a Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.542702 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" event={"ID":"f3fc7f11-f784-425a-a74a-c31e2be86970","Type":"ContainerStarted","Data":"842baf280b2e6603a605dbe001df1ee4338a7c43a243a6429351081bfd6ae878"} Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.543642 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" event={"ID":"2a0467f9-ffdb-41c1-9bdc-02224075b4f3","Type":"ContainerStarted","Data":"cc4bb67b54f1ddb52d3150b47afd5afcad056744986ecd1866a0aa0d3154074a"} Nov 21 15:46:01 crc kubenswrapper[4967]: I1121 15:46:01.545171 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-pwx86" event={"ID":"cde8cf1b-1296-410b-82aa-a657c4118292","Type":"ContainerStarted","Data":"e97107388bac145958cbd87cfad5ea1a303286b5a5b64f35469ef090caea288b"} Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.572113 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" event={"ID":"2a0467f9-ffdb-41c1-9bdc-02224075b4f3","Type":"ContainerStarted","Data":"ed6093f6df3d2e8c1745f717aeab3737340706fe1f8c8282c714587350213ad0"} Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.573714 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-pwx86" event={"ID":"cde8cf1b-1296-410b-82aa-a657c4118292","Type":"ContainerStarted","Data":"f36096fd968a376270546167e90cbb292d816cdd3a4628a3da016bd28cd39b6f"} Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.575486 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" event={"ID":"f3fc7f11-f784-425a-a74a-c31e2be86970","Type":"ContainerStarted","Data":"a8dc98dbbeabde2ce2d61997a50d60149967a5aea9e947cc1d1eb0e6c4030597"} Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.575661 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.605010 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-nv4pt" podStartSLOduration=2.2967645230000002 podStartE2EDuration="5.604992609s" podCreationTimestamp="2025-11-21 15:46:00 +0000 UTC" firstStartedPulling="2025-11-21 15:46:01.234069594 +0000 UTC m=+649.492590602" lastFinishedPulling="2025-11-21 15:46:04.54229768 +0000 UTC m=+652.800818688" observedRunningTime="2025-11-21 15:46:05.58796273 +0000 UTC m=+653.846483738" watchObservedRunningTime="2025-11-21 15:46:05.604992609 +0000 UTC m=+653.863513617" Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.606321 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" podStartSLOduration=2.245940142 podStartE2EDuration="5.606298666s" podCreationTimestamp="2025-11-21 15:46:00 +0000 UTC" firstStartedPulling="2025-11-21 15:46:01.127942587 +0000 UTC m=+649.386463595" lastFinishedPulling="2025-11-21 15:46:04.488301111 +0000 UTC m=+652.746822119" observedRunningTime="2025-11-21 15:46:05.602113546 +0000 UTC m=+653.860634554" watchObservedRunningTime="2025-11-21 15:46:05.606298666 +0000 UTC m=+653.864819674" Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.617824 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-pwx86" podStartSLOduration=2.218399488 podStartE2EDuration="5.617804576s" podCreationTimestamp="2025-11-21 15:46:00 +0000 UTC" firstStartedPulling="2025-11-21 15:46:01.088932894 +0000 UTC m=+649.347453902" lastFinishedPulling="2025-11-21 15:46:04.488337982 +0000 UTC m=+652.746858990" observedRunningTime="2025-11-21 15:46:05.617033054 +0000 UTC m=+653.875554062" watchObservedRunningTime="2025-11-21 15:46:05.617804576 +0000 UTC m=+653.876325584" Nov 21 15:46:05 crc kubenswrapper[4967]: I1121 15:46:05.905304 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-6tvkc" Nov 21 15:46:10 crc kubenswrapper[4967]: I1121 15:46:10.860082 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-d4z6x" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.388074 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf"] Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.390000 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.391766 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.401442 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf"] Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.481940 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.482004 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb48m\" (UniqueName: \"kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.482083 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.583842 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.583910 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.583974 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb48m\" (UniqueName: \"kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.584514 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.584514 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.613654 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb48m\" (UniqueName: \"kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:37 crc kubenswrapper[4967]: I1121 15:46:37.707966 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:38 crc kubenswrapper[4967]: I1121 15:46:38.122259 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf"] Nov 21 15:46:38 crc kubenswrapper[4967]: I1121 15:46:38.773584 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerStarted","Data":"1bc4b2972e4bfc32927b530b397d52b51b26d3ce896b6ce4570729987d4a49e8"} Nov 21 15:46:38 crc kubenswrapper[4967]: I1121 15:46:38.982090 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk"] Nov 21 15:46:38 crc kubenswrapper[4967]: I1121 15:46:38.983645 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:38 crc kubenswrapper[4967]: I1121 15:46:38.991226 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk"] Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.106508 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bbm4\" (UniqueName: \"kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.106569 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.106609 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.207985 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bbm4\" (UniqueName: \"kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.208509 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.208659 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.208995 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.209058 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.227949 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bbm4\" (UniqueName: \"kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.298901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.714763 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk"] Nov 21 15:46:39 crc kubenswrapper[4967]: W1121 15:46:39.722412 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04f688a6_4d0b_49f8_99db_98ecfc140fb9.slice/crio-56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3 WatchSource:0}: Error finding container 56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3: Status 404 returned error can't find the container with id 56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3 Nov 21 15:46:39 crc kubenswrapper[4967]: I1121 15:46:39.781726 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerStarted","Data":"56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3"} Nov 21 15:46:41 crc kubenswrapper[4967]: I1121 15:46:41.793740 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerStarted","Data":"06964f2481f63223ef755ceceae256a02abba2bf44940ff6caff4232e08ec2cb"} Nov 21 15:46:42 crc kubenswrapper[4967]: I1121 15:46:42.801563 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerStarted","Data":"9b9c6bb6e25804fad4f56f3338d29b62e726ddcdda4769a1af265c2c16668f6e"} Nov 21 15:46:43 crc kubenswrapper[4967]: I1121 15:46:43.810490 4967 generic.go:334] "Generic (PLEG): container finished" podID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerID="06964f2481f63223ef755ceceae256a02abba2bf44940ff6caff4232e08ec2cb" exitCode=0 Nov 21 15:46:43 crc kubenswrapper[4967]: I1121 15:46:43.810536 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerDied","Data":"06964f2481f63223ef755ceceae256a02abba2bf44940ff6caff4232e08ec2cb"} Nov 21 15:46:43 crc kubenswrapper[4967]: I1121 15:46:43.813261 4967 generic.go:334] "Generic (PLEG): container finished" podID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerID="9b9c6bb6e25804fad4f56f3338d29b62e726ddcdda4769a1af265c2c16668f6e" exitCode=0 Nov 21 15:46:43 crc kubenswrapper[4967]: I1121 15:46:43.813332 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerDied","Data":"9b9c6bb6e25804fad4f56f3338d29b62e726ddcdda4769a1af265c2c16668f6e"} Nov 21 15:46:47 crc kubenswrapper[4967]: I1121 15:46:47.836782 4967 generic.go:334] "Generic (PLEG): container finished" podID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerID="e8b86167fc361e273b3d23f46ecb78aa4b87062b41dd1b4e748811a5935171e6" exitCode=0 Nov 21 15:46:47 crc kubenswrapper[4967]: I1121 15:46:47.836914 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerDied","Data":"e8b86167fc361e273b3d23f46ecb78aa4b87062b41dd1b4e748811a5935171e6"} Nov 21 15:46:48 crc kubenswrapper[4967]: I1121 15:46:48.844819 4967 generic.go:334] "Generic (PLEG): container finished" podID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerID="7498ff2e06994ac08122ccf5028fe6c671806682742e18e9565eeb7330946eaa" exitCode=0 Nov 21 15:46:48 crc kubenswrapper[4967]: I1121 15:46:48.844893 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerDied","Data":"7498ff2e06994ac08122ccf5028fe6c671806682742e18e9565eeb7330946eaa"} Nov 21 15:46:48 crc kubenswrapper[4967]: I1121 15:46:48.847981 4967 generic.go:334] "Generic (PLEG): container finished" podID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerID="2e6aa7af0a2c65bcd09fb6ec2a9bf8d5e07d4144f188d6206bfbcdc397de2c66" exitCode=0 Nov 21 15:46:48 crc kubenswrapper[4967]: I1121 15:46:48.848022 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerDied","Data":"2e6aa7af0a2c65bcd09fb6ec2a9bf8d5e07d4144f188d6206bfbcdc397de2c66"} Nov 21 15:46:49 crc kubenswrapper[4967]: I1121 15:46:49.855971 4967 generic.go:334] "Generic (PLEG): container finished" podID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerID="22dece17c4b0d4d64c03689526067aa7203a149bbc5310d51500281d7a47c408" exitCode=0 Nov 21 15:46:49 crc kubenswrapper[4967]: I1121 15:46:49.856405 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerDied","Data":"22dece17c4b0d4d64c03689526067aa7203a149bbc5310d51500281d7a47c408"} Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.116746 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.170376 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util\") pod \"0d08e0c8-b3f9-4742-9388-686edea297eb\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.170563 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb48m\" (UniqueName: \"kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m\") pod \"0d08e0c8-b3f9-4742-9388-686edea297eb\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.170603 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle\") pod \"0d08e0c8-b3f9-4742-9388-686edea297eb\" (UID: \"0d08e0c8-b3f9-4742-9388-686edea297eb\") " Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.172454 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle" (OuterVolumeSpecName: "bundle") pod "0d08e0c8-b3f9-4742-9388-686edea297eb" (UID: "0d08e0c8-b3f9-4742-9388-686edea297eb"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.177726 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m" (OuterVolumeSpecName: "kube-api-access-fb48m") pod "0d08e0c8-b3f9-4742-9388-686edea297eb" (UID: "0d08e0c8-b3f9-4742-9388-686edea297eb"). InnerVolumeSpecName "kube-api-access-fb48m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.181480 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util" (OuterVolumeSpecName: "util") pod "0d08e0c8-b3f9-4742-9388-686edea297eb" (UID: "0d08e0c8-b3f9-4742-9388-686edea297eb"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.272426 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb48m\" (UniqueName: \"kubernetes.io/projected/0d08e0c8-b3f9-4742-9388-686edea297eb-kube-api-access-fb48m\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.272463 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.272473 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d08e0c8-b3f9-4742-9388-686edea297eb-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.866334 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" event={"ID":"0d08e0c8-b3f9-4742-9388-686edea297eb","Type":"ContainerDied","Data":"1bc4b2972e4bfc32927b530b397d52b51b26d3ce896b6ce4570729987d4a49e8"} Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.866386 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bc4b2972e4bfc32927b530b397d52b51b26d3ce896b6ce4570729987d4a49e8" Nov 21 15:46:50 crc kubenswrapper[4967]: I1121 15:46:50.866381 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.079343 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.188460 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util\") pod \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.188645 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bbm4\" (UniqueName: \"kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4\") pod \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.188830 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle\") pod \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\" (UID: \"04f688a6-4d0b-49f8-99db-98ecfc140fb9\") " Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.189754 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle" (OuterVolumeSpecName: "bundle") pod "04f688a6-4d0b-49f8-99db-98ecfc140fb9" (UID: "04f688a6-4d0b-49f8-99db-98ecfc140fb9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.196564 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4" (OuterVolumeSpecName: "kube-api-access-2bbm4") pod "04f688a6-4d0b-49f8-99db-98ecfc140fb9" (UID: "04f688a6-4d0b-49f8-99db-98ecfc140fb9"). InnerVolumeSpecName "kube-api-access-2bbm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.200185 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util" (OuterVolumeSpecName: "util") pod "04f688a6-4d0b-49f8-99db-98ecfc140fb9" (UID: "04f688a6-4d0b-49f8-99db-98ecfc140fb9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.291188 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.291248 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bbm4\" (UniqueName: \"kubernetes.io/projected/04f688a6-4d0b-49f8-99db-98ecfc140fb9-kube-api-access-2bbm4\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.291284 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04f688a6-4d0b-49f8-99db-98ecfc140fb9-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.874328 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" event={"ID":"04f688a6-4d0b-49f8-99db-98ecfc140fb9","Type":"ContainerDied","Data":"56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3"} Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.874585 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56b8113b52f303542248e857af5b73683ded4f04a83ddb117d124ba5a5aa7bf3" Nov 21 15:46:51 crc kubenswrapper[4967]: I1121 15:46:51.874374 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.482987 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7"] Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483618 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="pull" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483634 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="pull" Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483655 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="util" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483663 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="util" Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483675 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="pull" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483683 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="pull" Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483692 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483700 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483714 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="util" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483721 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="util" Nov 21 15:46:57 crc kubenswrapper[4967]: E1121 15:46:57.483746 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483754 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483902 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d08e0c8-b3f9-4742-9388-686edea297eb" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.483921 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="04f688a6-4d0b-49f8-99db-98ecfc140fb9" containerName="extract" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.484849 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.487361 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-lr6q9" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.487453 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.487559 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.487827 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.487926 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.489538 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.499693 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7"] Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.571175 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gm4t\" (UniqueName: \"kubernetes.io/projected/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-kube-api-access-8gm4t\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.572342 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.572707 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-manager-config\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.572829 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-webhook-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.573214 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-apiservice-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.675006 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-apiservice-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.675375 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gm4t\" (UniqueName: \"kubernetes.io/projected/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-kube-api-access-8gm4t\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.675411 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.675434 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-manager-config\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.675460 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-webhook-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.676822 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-manager-config\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.682998 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-apiservice-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.683016 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.683238 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-webhook-cert\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.704101 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gm4t\" (UniqueName: \"kubernetes.io/projected/cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7-kube-api-access-8gm4t\") pod \"loki-operator-controller-manager-77f9f48c4d-942m7\" (UID: \"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7\") " pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:57 crc kubenswrapper[4967]: I1121 15:46:57.799997 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:46:58 crc kubenswrapper[4967]: I1121 15:46:58.015439 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7"] Nov 21 15:46:58 crc kubenswrapper[4967]: I1121 15:46:58.931615 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" event={"ID":"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7","Type":"ContainerStarted","Data":"f7af5c2db006dc7a1e0ae98856a89f27b8ba1b595832965e2e73c27b702e36cc"} Nov 21 15:47:03 crc kubenswrapper[4967]: I1121 15:47:03.979044 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" event={"ID":"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7","Type":"ContainerStarted","Data":"e0ee8e8c6fe2e06cbc79bd30b43b1381c0bac7382432b4b2a0c06d5c598cf692"} Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.008017 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xrnc9"] Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.009066 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.010968 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.011211 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-5zhbl" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.011538 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.021090 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xrnc9"] Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.096567 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxbtt\" (UniqueName: \"kubernetes.io/projected/4617859e-9a3d-412d-adbe-b229de618303-kube-api-access-dxbtt\") pod \"cluster-logging-operator-ff9846bd-xrnc9\" (UID: \"4617859e-9a3d-412d-adbe-b229de618303\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.197797 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxbtt\" (UniqueName: \"kubernetes.io/projected/4617859e-9a3d-412d-adbe-b229de618303-kube-api-access-dxbtt\") pod \"cluster-logging-operator-ff9846bd-xrnc9\" (UID: \"4617859e-9a3d-412d-adbe-b229de618303\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.220215 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxbtt\" (UniqueName: \"kubernetes.io/projected/4617859e-9a3d-412d-adbe-b229de618303-kube-api-access-dxbtt\") pod \"cluster-logging-operator-ff9846bd-xrnc9\" (UID: \"4617859e-9a3d-412d-adbe-b229de618303\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.328972 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.571565 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-xrnc9"] Nov 21 15:47:04 crc kubenswrapper[4967]: I1121 15:47:04.998361 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" event={"ID":"4617859e-9a3d-412d-adbe-b229de618303","Type":"ContainerStarted","Data":"f76e2117ab1671f58aa1bf25270db9bd4c2c51be3982366fcde71ef4572104aa"} Nov 21 15:47:13 crc kubenswrapper[4967]: I1121 15:47:13.052912 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" event={"ID":"cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7","Type":"ContainerStarted","Data":"a467841ae34c74a734629b4d9973cc6509ba290e1404874ff86f212d99684018"} Nov 21 15:47:13 crc kubenswrapper[4967]: I1121 15:47:13.053479 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:47:13 crc kubenswrapper[4967]: I1121 15:47:13.055334 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" event={"ID":"4617859e-9a3d-412d-adbe-b229de618303","Type":"ContainerStarted","Data":"39003ef21e56056b097e24af091019c6537fdb0848cbc774bee81be4caee6181"} Nov 21 15:47:13 crc kubenswrapper[4967]: I1121 15:47:13.057773 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" Nov 21 15:47:13 crc kubenswrapper[4967]: I1121 15:47:13.081669 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-77f9f48c4d-942m7" podStartSLOduration=2.195223692 podStartE2EDuration="16.081652463s" podCreationTimestamp="2025-11-21 15:46:57 +0000 UTC" firstStartedPulling="2025-11-21 15:46:58.028345469 +0000 UTC m=+706.286866477" lastFinishedPulling="2025-11-21 15:47:11.91477424 +0000 UTC m=+720.173295248" observedRunningTime="2025-11-21 15:47:13.078915005 +0000 UTC m=+721.337436013" watchObservedRunningTime="2025-11-21 15:47:13.081652463 +0000 UTC m=+721.340173471" Nov 21 15:47:16 crc kubenswrapper[4967]: I1121 15:47:16.522569 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:47:16 crc kubenswrapper[4967]: I1121 15:47:16.522935 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.568689 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-xrnc9" podStartSLOduration=8.234797074 podStartE2EDuration="15.568655629s" podCreationTimestamp="2025-11-21 15:47:03 +0000 UTC" firstStartedPulling="2025-11-21 15:47:04.582003686 +0000 UTC m=+712.840524704" lastFinishedPulling="2025-11-21 15:47:11.915862251 +0000 UTC m=+720.174383259" observedRunningTime="2025-11-21 15:47:13.126130199 +0000 UTC m=+721.384651207" watchObservedRunningTime="2025-11-21 15:47:18.568655629 +0000 UTC m=+726.827176637" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.569405 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.570604 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.573363 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.573437 4967 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-9jtn9" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.573934 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.576263 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.633636 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.634056 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss7rc\" (UniqueName: \"kubernetes.io/projected/6f738a38-ee0f-43d0-b125-753dd0a6846c-kube-api-access-ss7rc\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.735483 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss7rc\" (UniqueName: \"kubernetes.io/projected/6f738a38-ee0f-43d0-b125-753dd0a6846c-kube-api-access-ss7rc\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.735614 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.739223 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.739268 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/698f204a83299038c477d54c96c8673923d96c2b8a06ec97ee9edb967d7379b4/globalmount\"" pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.759091 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss7rc\" (UniqueName: \"kubernetes.io/projected/6f738a38-ee0f-43d0-b125-753dd0a6846c-kube-api-access-ss7rc\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.767754 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-5dff3484-8e78-42b5-8555-3f74e4553fbf\") pod \"minio\" (UID: \"6f738a38-ee0f-43d0-b125-753dd0a6846c\") " pod="minio-dev/minio" Nov 21 15:47:18 crc kubenswrapper[4967]: I1121 15:47:18.889567 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Nov 21 15:47:19 crc kubenswrapper[4967]: I1121 15:47:19.294108 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Nov 21 15:47:20 crc kubenswrapper[4967]: I1121 15:47:20.115656 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"6f738a38-ee0f-43d0-b125-753dd0a6846c","Type":"ContainerStarted","Data":"4a2816fd0c98497c84e817b044185c40faab2b1f2b5ff05bb1b8f1da11cb1fd9"} Nov 21 15:47:23 crc kubenswrapper[4967]: I1121 15:47:23.140570 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"6f738a38-ee0f-43d0-b125-753dd0a6846c","Type":"ContainerStarted","Data":"ce9274f57fd77bd7e218f6f5c38db8e87f709ab2410b4a626cce6c694ae86541"} Nov 21 15:47:23 crc kubenswrapper[4967]: I1121 15:47:23.155838 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.249050634 podStartE2EDuration="8.15581653s" podCreationTimestamp="2025-11-21 15:47:15 +0000 UTC" firstStartedPulling="2025-11-21 15:47:19.307186695 +0000 UTC m=+727.565707693" lastFinishedPulling="2025-11-21 15:47:22.213952581 +0000 UTC m=+730.472473589" observedRunningTime="2025-11-21 15:47:23.154022359 +0000 UTC m=+731.412543367" watchObservedRunningTime="2025-11-21 15:47:23.15581653 +0000 UTC m=+731.414337558" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.399473 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-w6drw"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.400789 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.405001 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-hgngr" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.405664 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.406488 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.406667 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.406511 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.422635 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-w6drw"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.472824 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.472888 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw7nl\" (UniqueName: \"kubernetes.io/projected/942afa8f-650f-4a9e-b47f-2be4134d16b9-kube-api-access-jw7nl\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.472927 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.472962 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-config\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.472993 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.574757 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.574825 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw7nl\" (UniqueName: \"kubernetes.io/projected/942afa8f-650f-4a9e-b47f-2be4134d16b9-kube-api-access-jw7nl\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.574860 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.574895 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-config\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.574928 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.576926 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.577945 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/942afa8f-650f-4a9e-b47f-2be4134d16b9-config\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.583650 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.593096 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/942afa8f-650f-4a9e-b47f-2be4134d16b9-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.597171 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-56lsk"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.598165 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.601525 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.601767 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.603794 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw7nl\" (UniqueName: \"kubernetes.io/projected/942afa8f-650f-4a9e-b47f-2be4134d16b9-kube-api-access-jw7nl\") pod \"logging-loki-distributor-76cc67bf56-w6drw\" (UID: \"942afa8f-650f-4a9e-b47f-2be4134d16b9\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.617687 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.627096 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-56lsk"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678191 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678656 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678691 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-config\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678723 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678758 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdmnd\" (UniqueName: \"kubernetes.io/projected/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-kube-api-access-zdmnd\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.678875 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.726700 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787154 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787242 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787277 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-config\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787324 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787356 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdmnd\" (UniqueName: \"kubernetes.io/projected/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-kube-api-access-zdmnd\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.787440 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.789257 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-config\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.789526 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.797270 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.810017 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.811121 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.828346 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdmnd\" (UniqueName: \"kubernetes.io/projected/d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3-kube-api-access-zdmnd\") pod \"logging-loki-querier-5895d59bb8-56lsk\" (UID: \"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.871972 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.872872 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.882038 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.882256 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.892938 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g"] Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.956283 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.992611 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-config\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.992659 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.992683 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.992762 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:27 crc kubenswrapper[4967]: I1121 15:47:27.992805 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9v6g\" (UniqueName: \"kubernetes.io/projected/16d794ce-4b6f-4250-835b-28311b905a2c-kube-api-access-n9v6g\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.013901 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-4xd89"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.016140 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.022789 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.023298 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-8d79x" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.023423 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.023549 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.023651 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.025499 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.034875 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-4xd89"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.040380 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-thgv6"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.041729 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.069733 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-thgv6"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.095825 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-rbac\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.095880 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.095949 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-config\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.095981 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096003 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65fxv\" (UniqueName: \"kubernetes.io/projected/1c995e7a-4ea8-459f-83a9-eede922cb3e3-kube-api-access-65fxv\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096026 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096050 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw8gb\" (UniqueName: \"kubernetes.io/projected/8833a69e-7f87-4f56-9610-8dd9cb841732-kube-api-access-nw8gb\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096077 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096099 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096135 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096163 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tenants\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096185 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096208 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096227 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tenants\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096262 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096285 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-rbac\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096321 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9v6g\" (UniqueName: \"kubernetes.io/projected/16d794ce-4b6f-4250-835b-28311b905a2c-kube-api-access-n9v6g\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096340 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.096362 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.097984 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-config\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.100391 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.106099 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.137513 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/16d794ce-4b6f-4250-835b-28311b905a2c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.151466 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9v6g\" (UniqueName: \"kubernetes.io/projected/16d794ce-4b6f-4250-835b-28311b905a2c-kube-api-access-n9v6g\") pod \"logging-loki-query-frontend-84558f7c9f-dsn5g\" (UID: \"16d794ce-4b6f-4250-835b-28311b905a2c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.199704 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200155 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200258 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65fxv\" (UniqueName: \"kubernetes.io/projected/1c995e7a-4ea8-459f-83a9-eede922cb3e3-kube-api-access-65fxv\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200284 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw8gb\" (UniqueName: \"kubernetes.io/projected/8833a69e-7f87-4f56-9610-8dd9cb841732-kube-api-access-nw8gb\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200350 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200380 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200408 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200442 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200481 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tenants\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200515 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200541 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tenants\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200570 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200594 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200624 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-rbac\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200662 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200689 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.200714 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-rbac\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.201790 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-rbac\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.202447 4967 configmap.go:193] Couldn't get configMap openshift-logging/logging-loki-gateway-ca-bundle: configmap "logging-loki-gateway-ca-bundle" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.202507 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle podName:8833a69e-7f87-4f56-9610-8dd9cb841732 nodeName:}" failed. No retries permitted until 2025-11-21 15:47:28.702488291 +0000 UTC m=+736.961009299 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "logging-loki-gateway-ca-bundle" (UniqueName: "kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle") pod "logging-loki-gateway-f4f7895cf-4xd89" (UID: "8833a69e-7f87-4f56-9610-8dd9cb841732") : configmap "logging-loki-gateway-ca-bundle" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.202646 4967 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.202819 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret podName:8833a69e-7f87-4f56-9610-8dd9cb841732 nodeName:}" failed. No retries permitted until 2025-11-21 15:47:28.702740179 +0000 UTC m=+736.961261247 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret") pod "logging-loki-gateway-f4f7895cf-4xd89" (UID: "8833a69e-7f87-4f56-9610-8dd9cb841732") : secret "logging-loki-gateway-http" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.203054 4967 secret.go:188] Couldn't get secret openshift-logging/logging-loki-gateway-http: secret "logging-loki-gateway-http" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.203129 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret podName:1c995e7a-4ea8-459f-83a9-eede922cb3e3 nodeName:}" failed. No retries permitted until 2025-11-21 15:47:28.703109249 +0000 UTC m=+736.961630317 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-secret" (UniqueName: "kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret") pod "logging-loki-gateway-f4f7895cf-thgv6" (UID: "1c995e7a-4ea8-459f-83a9-eede922cb3e3") : secret "logging-loki-gateway-http" not found Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.203949 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.204223 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.204536 4967 configmap.go:193] Couldn't get configMap openshift-logging/logging-loki-gateway-ca-bundle: configmap "logging-loki-gateway-ca-bundle" not found Nov 21 15:47:28 crc kubenswrapper[4967]: E1121 15:47:28.204630 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle podName:1c995e7a-4ea8-459f-83a9-eede922cb3e3 nodeName:}" failed. No retries permitted until 2025-11-21 15:47:28.704604522 +0000 UTC m=+736.963125580 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "logging-loki-gateway-ca-bundle" (UniqueName: "kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle") pod "logging-loki-gateway-f4f7895cf-thgv6" (UID: "1c995e7a-4ea8-459f-83a9-eede922cb3e3") : configmap "logging-loki-gateway-ca-bundle" not found Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.205244 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.204503 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.206171 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-rbac\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.207443 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-lokistack-gateway\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.208242 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.209247 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tenants\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.221593 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tenants\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.223752 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65fxv\" (UniqueName: \"kubernetes.io/projected/1c995e7a-4ea8-459f-83a9-eede922cb3e3-kube-api-access-65fxv\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.227205 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw8gb\" (UniqueName: \"kubernetes.io/projected/8833a69e-7f87-4f56-9610-8dd9cb841732-kube-api-access-nw8gb\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.336658 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-w6drw"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.548049 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-56lsk"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.575988 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.577358 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.586663 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.586929 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.587754 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.667274 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g"] Nov 21 15:47:28 crc kubenswrapper[4967]: W1121 15:47:28.675203 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16d794ce_4b6f_4250_835b_28311b905a2c.slice/crio-433aa81999a5528e2f11b21dcc3bf4f1b58d5757aef1226dbc5797999fcb084c WatchSource:0}: Error finding container 433aa81999a5528e2f11b21dcc3bf4f1b58d5757aef1226dbc5797999fcb084c: Status 404 returned error can't find the container with id 433aa81999a5528e2f11b21dcc3bf4f1b58d5757aef1226dbc5797999fcb084c Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.697488 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.698501 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.701749 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.705786 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.711230 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-config\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.711459 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-702dde42-2488-440a-839d-0fecdadc4673\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-702dde42-2488-440a-839d-0fecdadc4673\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.711607 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.711722 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.711846 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712083 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712228 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712500 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712683 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712721 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712868 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf45c\" (UniqueName: \"kubernetes.io/projected/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-kube-api-access-wf45c\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712889 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1c995e7a-4ea8-459f-83a9-eede922cb3e3-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.712904 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.713568 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8833a69e-7f87-4f56-9610-8dd9cb841732-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.716123 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.717576 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/8833a69e-7f87-4f56-9610-8dd9cb841732-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-4xd89\" (UID: \"8833a69e-7f87-4f56-9610-8dd9cb841732\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.717726 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/1c995e7a-4ea8-459f-83a9-eede922cb3e3-tls-secret\") pod \"logging-loki-gateway-f4f7895cf-thgv6\" (UID: \"1c995e7a-4ea8-459f-83a9-eede922cb3e3\") " pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814159 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814219 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-config\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814259 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814292 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf45c\" (UniqueName: \"kubernetes.io/projected/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-kube-api-access-wf45c\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814399 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814440 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-config\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814462 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwj9h\" (UniqueName: \"kubernetes.io/projected/e0783f70-8b59-4215-be4a-8ca2c97cc788-kube-api-access-lwj9h\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814495 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814528 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-702dde42-2488-440a-839d-0fecdadc4673\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-702dde42-2488-440a-839d-0fecdadc4673\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814564 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814600 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814628 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814655 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814686 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.814749 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.815535 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-config\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.815779 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.817259 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.817871 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.818548 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.820352 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.820389 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6f39e6917717c74829fd9034505ae1e0a88b1fb992d43f009d45c2bed05e6b72/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.821068 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.821099 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-702dde42-2488-440a-839d-0fecdadc4673\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-702dde42-2488-440a-839d-0fecdadc4673\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ebf2fb77c10c5583be0256e7080e29866de4eb14c503d0fb1feca3b42c6d1177/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.835668 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf45c\" (UniqueName: \"kubernetes.io/projected/16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4-kube-api-access-wf45c\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.859528 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-702dde42-2488-440a-839d-0fecdadc4673\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-702dde42-2488-440a-839d-0fecdadc4673\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.860693 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-562ff877-c4a1-4f99-8bee-4189fff8428f\") pod \"logging-loki-ingester-0\" (UID: \"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4\") " pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.900100 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.901126 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.907187 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.907197 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920116 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920240 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-config\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920413 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwj9h\" (UniqueName: \"kubernetes.io/projected/e0783f70-8b59-4215-be4a-8ca2c97cc788-kube-api-access-lwj9h\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920458 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920515 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.920560 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.923929 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.924737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.925559 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0783f70-8b59-4215-be4a-8ca2c97cc788-config\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.926983 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.927064 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/57ed71bd3ada59dfe01875f6c454aebf8982b1a61b3d096c33381d000aa13b21/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.928605 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.932000 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.937217 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.937465 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e0783f70-8b59-4215-be4a-8ca2c97cc788-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.952144 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwj9h\" (UniqueName: \"kubernetes.io/projected/e0783f70-8b59-4215-be4a-8ca2c97cc788-kube-api-access-lwj9h\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.955449 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ba0fee72-f3b5-4985-b811-1b1ea8a02aa4\") pod \"logging-loki-compactor-0\" (UID: \"e0783f70-8b59-4215-be4a-8ca2c97cc788\") " pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.976860 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:28 crc kubenswrapper[4967]: I1121 15:47:28.996734 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022349 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022431 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkcpr\" (UniqueName: \"kubernetes.io/projected/ea93b8c8-babe-4417-8741-9ae060295ba0-kube-api-access-nkcpr\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022453 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022518 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022534 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.022555 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.044856 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.124823 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.124924 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkcpr\" (UniqueName: \"kubernetes.io/projected/ea93b8c8-babe-4417-8741-9ae060295ba0-kube-api-access-nkcpr\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.124968 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.125046 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.125075 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.125110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.125130 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.126170 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.126177 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ea93b8c8-babe-4417-8741-9ae060295ba0-config\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.126957 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.126978 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c90250c904cfd7fe993ecc590eae11fdaa63803536690becab890278c7fb4e1d/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.130936 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.132266 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.132447 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/ea93b8c8-babe-4417-8741-9ae060295ba0-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.144864 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkcpr\" (UniqueName: \"kubernetes.io/projected/ea93b8c8-babe-4417-8741-9ae060295ba0-kube-api-access-nkcpr\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.156587 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-8b4d24b4-0551-4127-b6b9-ca8dc54b999a\") pod \"logging-loki-index-gateway-0\" (UID: \"ea93b8c8-babe-4417-8741-9ae060295ba0\") " pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.185114 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" event={"ID":"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3","Type":"ContainerStarted","Data":"b23fb7de1f3e51546cea26cee31b0f736744035c8b5001a630b5d03b8b3b0a74"} Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.186289 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" event={"ID":"942afa8f-650f-4a9e-b47f-2be4134d16b9","Type":"ContainerStarted","Data":"201074d6e0cbbceee949e1ba0b7b7e6eac137e0f40d6036cb9608563030e07d8"} Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.189193 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" event={"ID":"16d794ce-4b6f-4250-835b-28311b905a2c","Type":"ContainerStarted","Data":"433aa81999a5528e2f11b21dcc3bf4f1b58d5757aef1226dbc5797999fcb084c"} Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.219434 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.387224 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Nov 21 15:47:29 crc kubenswrapper[4967]: W1121 15:47:29.397004 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0783f70_8b59_4215_be4a_8ca2c97cc788.slice/crio-db9355d96867e75f1a24ba2ca783da9a50efa096c49176a72ecdadfea67df4bb WatchSource:0}: Error finding container db9355d96867e75f1a24ba2ca783da9a50efa096c49176a72ecdadfea67df4bb: Status 404 returned error can't find the container with id db9355d96867e75f1a24ba2ca783da9a50efa096c49176a72ecdadfea67df4bb Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.464232 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-4xd89"] Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.470467 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.479753 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Nov 21 15:47:29 crc kubenswrapper[4967]: W1121 15:47:29.483380 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16e7bcb3_c6a0_440e_a47f_4c3ce1ddd3c4.slice/crio-7e64b42b240613b5d44f8eb5118e4f4f591c5fd7363a6f1fdc5fabdd0b4e5699 WatchSource:0}: Error finding container 7e64b42b240613b5d44f8eb5118e4f4f591c5fd7363a6f1fdc5fabdd0b4e5699: Status 404 returned error can't find the container with id 7e64b42b240613b5d44f8eb5118e4f4f591c5fd7363a6f1fdc5fabdd0b4e5699 Nov 21 15:47:29 crc kubenswrapper[4967]: W1121 15:47:29.485708 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8833a69e_7f87_4f56_9610_8dd9cb841732.slice/crio-2de0cb51a1a8919ebdd1c1c7de931f7d0dbde0ae98b4d16767777f09e5a2fd29 WatchSource:0}: Error finding container 2de0cb51a1a8919ebdd1c1c7de931f7d0dbde0ae98b4d16767777f09e5a2fd29: Status 404 returned error can't find the container with id 2de0cb51a1a8919ebdd1c1c7de931f7d0dbde0ae98b4d16767777f09e5a2fd29 Nov 21 15:47:29 crc kubenswrapper[4967]: W1121 15:47:29.487067 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea93b8c8_babe_4417_8741_9ae060295ba0.slice/crio-240770483fb3135ca50df77bc197762f1ae7a327b2abe98232c8732e74870c09 WatchSource:0}: Error finding container 240770483fb3135ca50df77bc197762f1ae7a327b2abe98232c8732e74870c09: Status 404 returned error can't find the container with id 240770483fb3135ca50df77bc197762f1ae7a327b2abe98232c8732e74870c09 Nov 21 15:47:29 crc kubenswrapper[4967]: I1121 15:47:29.553223 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-f4f7895cf-thgv6"] Nov 21 15:47:29 crc kubenswrapper[4967]: W1121 15:47:29.558483 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c995e7a_4ea8_459f_83a9_eede922cb3e3.slice/crio-ee594fff64519ccdff9daf6626e6dcce0ae65d2cd1d6bb497b9de11696580acc WatchSource:0}: Error finding container ee594fff64519ccdff9daf6626e6dcce0ae65d2cd1d6bb497b9de11696580acc: Status 404 returned error can't find the container with id ee594fff64519ccdff9daf6626e6dcce0ae65d2cd1d6bb497b9de11696580acc Nov 21 15:47:30 crc kubenswrapper[4967]: I1121 15:47:30.215506 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"e0783f70-8b59-4215-be4a-8ca2c97cc788","Type":"ContainerStarted","Data":"db9355d96867e75f1a24ba2ca783da9a50efa096c49176a72ecdadfea67df4bb"} Nov 21 15:47:30 crc kubenswrapper[4967]: I1121 15:47:30.220124 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4","Type":"ContainerStarted","Data":"7e64b42b240613b5d44f8eb5118e4f4f591c5fd7363a6f1fdc5fabdd0b4e5699"} Nov 21 15:47:30 crc kubenswrapper[4967]: I1121 15:47:30.224504 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"ea93b8c8-babe-4417-8741-9ae060295ba0","Type":"ContainerStarted","Data":"240770483fb3135ca50df77bc197762f1ae7a327b2abe98232c8732e74870c09"} Nov 21 15:47:30 crc kubenswrapper[4967]: I1121 15:47:30.225873 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" event={"ID":"1c995e7a-4ea8-459f-83a9-eede922cb3e3","Type":"ContainerStarted","Data":"ee594fff64519ccdff9daf6626e6dcce0ae65d2cd1d6bb497b9de11696580acc"} Nov 21 15:47:30 crc kubenswrapper[4967]: I1121 15:47:30.234788 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" event={"ID":"8833a69e-7f87-4f56-9610-8dd9cb841732","Type":"ContainerStarted","Data":"2de0cb51a1a8919ebdd1c1c7de931f7d0dbde0ae98b4d16767777f09e5a2fd29"} Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.262523 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4","Type":"ContainerStarted","Data":"89ae58bbc89540c4244b8e29ee9621684345e2e6a3eee24acf193d184f03f239"} Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.263087 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.265637 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" event={"ID":"d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3","Type":"ContainerStarted","Data":"94757a854655e97444cdd718e370dd9bba5e50b88de5b42e361b9d7ccc2c19af"} Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.266995 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"ea93b8c8-babe-4417-8741-9ae060295ba0","Type":"ContainerStarted","Data":"dfc8a22b88cb0f3356f0023bf59c104c3b7029ce8af8584201905df966e091fa"} Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.267109 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.283816 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=3.576715581 podStartE2EDuration="6.283794297s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:29.485531798 +0000 UTC m=+737.744052806" lastFinishedPulling="2025-11-21 15:47:32.192610524 +0000 UTC m=+740.451131522" observedRunningTime="2025-11-21 15:47:33.279952957 +0000 UTC m=+741.538473965" watchObservedRunningTime="2025-11-21 15:47:33.283794297 +0000 UTC m=+741.542315305" Nov 21 15:47:33 crc kubenswrapper[4967]: I1121 15:47:33.299060 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=3.575226658 podStartE2EDuration="6.299036484s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:29.49049257 +0000 UTC m=+737.749013578" lastFinishedPulling="2025-11-21 15:47:32.214302396 +0000 UTC m=+740.472823404" observedRunningTime="2025-11-21 15:47:33.295001619 +0000 UTC m=+741.553522627" watchObservedRunningTime="2025-11-21 15:47:33.299036484 +0000 UTC m=+741.557557492" Nov 21 15:47:34 crc kubenswrapper[4967]: I1121 15:47:34.297032 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" podStartSLOduration=3.669525472 podStartE2EDuration="7.297013032s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:28.564575958 +0000 UTC m=+736.823096966" lastFinishedPulling="2025-11-21 15:47:32.192063518 +0000 UTC m=+740.450584526" observedRunningTime="2025-11-21 15:47:34.293763079 +0000 UTC m=+742.552284107" watchObservedRunningTime="2025-11-21 15:47:34.297013032 +0000 UTC m=+742.555534060" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.279650 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" event={"ID":"16d794ce-4b6f-4250-835b-28311b905a2c","Type":"ContainerStarted","Data":"1fa6c47477868f4e32011a92e1daea81aa98bfe6218f1ecae33a20bc7312572a"} Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.281073 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.283140 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" event={"ID":"1c995e7a-4ea8-459f-83a9-eede922cb3e3","Type":"ContainerStarted","Data":"a7290bef9ebac596fd64b1b37fe6157849df365a6d394cbeac0c1a7ed89d7b45"} Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.284771 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" event={"ID":"8833a69e-7f87-4f56-9610-8dd9cb841732","Type":"ContainerStarted","Data":"e3213d8c7010cd18d1866adc31be2124420d07ad1f932fa5358222ef34bcddeb"} Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.286232 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"e0783f70-8b59-4215-be4a-8ca2c97cc788","Type":"ContainerStarted","Data":"4368f4f7eaefffb8e9871671ccf30635e6b5db308d0a2d7840c91994199849f3"} Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.286355 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.287577 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" event={"ID":"942afa8f-650f-4a9e-b47f-2be4134d16b9","Type":"ContainerStarted","Data":"c077bd5b0c36b21571273ce9ef739622055058586885488386c490d676bc30b8"} Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.287714 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.298694 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" podStartSLOduration=2.86315613 podStartE2EDuration="8.298677747s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:28.677480647 +0000 UTC m=+736.936001655" lastFinishedPulling="2025-11-21 15:47:34.113002264 +0000 UTC m=+742.371523272" observedRunningTime="2025-11-21 15:47:35.294162537 +0000 UTC m=+743.552683565" watchObservedRunningTime="2025-11-21 15:47:35.298677747 +0000 UTC m=+743.557198755" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.312830 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" podStartSLOduration=2.619660085 podStartE2EDuration="8.312811902s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:28.35478791 +0000 UTC m=+736.613308918" lastFinishedPulling="2025-11-21 15:47:34.047939737 +0000 UTC m=+742.306460735" observedRunningTime="2025-11-21 15:47:35.308509599 +0000 UTC m=+743.567030607" watchObservedRunningTime="2025-11-21 15:47:35.312811902 +0000 UTC m=+743.571332920" Nov 21 15:47:35 crc kubenswrapper[4967]: I1121 15:47:35.337767 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=3.482491457 podStartE2EDuration="8.337746438s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:29.403835204 +0000 UTC m=+737.662356212" lastFinishedPulling="2025-11-21 15:47:34.259090185 +0000 UTC m=+742.517611193" observedRunningTime="2025-11-21 15:47:35.330949733 +0000 UTC m=+743.589470741" watchObservedRunningTime="2025-11-21 15:47:35.337746438 +0000 UTC m=+743.596267456" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.297649 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" event={"ID":"1c995e7a-4ea8-459f-83a9-eede922cb3e3","Type":"ContainerStarted","Data":"85fea6a13c6bdc71d78c61f3ca27fcb9182630e51caa91c19fa59e9e3654fa95"} Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.298009 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.299535 4967 patch_prober.go:28] interesting pod/logging-loki-gateway-f4f7895cf-thgv6 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.75:8083/ready\": dial tcp 10.217.0.75:8083: connect: connection refused" start-of-body= Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.300409 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" podUID="1c995e7a-4ea8-459f-83a9-eede922cb3e3" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.75:8083/ready\": dial tcp 10.217.0.75:8083: connect: connection refused" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.300957 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" event={"ID":"8833a69e-7f87-4f56-9610-8dd9cb841732","Type":"ContainerStarted","Data":"4c072d429a5953a4cbe84dfa092ca0d9da5ff456090499f5d3de256a6df9c485"} Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.301333 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.301545 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.303430 4967 patch_prober.go:28] interesting pod/logging-loki-gateway-f4f7895cf-4xd89 container/opa namespace/openshift-logging: Readiness probe status=failure output="Get \"https://10.217.0.74:8083/ready\": dial tcp 10.217.0.74:8083: connect: connection refused" start-of-body= Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.303471 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" podUID="8833a69e-7f87-4f56-9610-8dd9cb841732" containerName="opa" probeResult="failure" output="Get \"https://10.217.0.74:8083/ready\": dial tcp 10.217.0.74:8083: connect: connection refused" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.318291 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" podStartSLOduration=1.7398827639999999 podStartE2EDuration="8.318269906s" podCreationTimestamp="2025-11-21 15:47:28 +0000 UTC" firstStartedPulling="2025-11-21 15:47:29.560982852 +0000 UTC m=+737.819503860" lastFinishedPulling="2025-11-21 15:47:36.139369994 +0000 UTC m=+744.397891002" observedRunningTime="2025-11-21 15:47:36.314744335 +0000 UTC m=+744.573265343" watchObservedRunningTime="2025-11-21 15:47:36.318269906 +0000 UTC m=+744.576790914" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.318674 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:36 crc kubenswrapper[4967]: I1121 15:47:36.337036 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" podStartSLOduration=2.6942878070000003 podStartE2EDuration="9.337016854s" podCreationTimestamp="2025-11-21 15:47:27 +0000 UTC" firstStartedPulling="2025-11-21 15:47:29.489946045 +0000 UTC m=+737.748467053" lastFinishedPulling="2025-11-21 15:47:36.132675092 +0000 UTC m=+744.391196100" observedRunningTime="2025-11-21 15:47:36.33480167 +0000 UTC m=+744.593322668" watchObservedRunningTime="2025-11-21 15:47:36.337016854 +0000 UTC m=+744.595537862" Nov 21 15:47:37 crc kubenswrapper[4967]: I1121 15:47:37.307469 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:37 crc kubenswrapper[4967]: I1121 15:47:37.313063 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:37 crc kubenswrapper[4967]: I1121 15:47:37.313796 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f4f7895cf-4xd89" Nov 21 15:47:37 crc kubenswrapper[4967]: I1121 15:47:37.320961 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-f4f7895cf-thgv6" Nov 21 15:47:37 crc kubenswrapper[4967]: I1121 15:47:37.957232 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.055339 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.055873 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerName="controller-manager" containerID="cri-o://4c95e49451563c84ef0984e57a9359488f85dfd2faeedd8070417de4f98e2c5a" gracePeriod=30 Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.173622 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.173955 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerName="route-controller-manager" containerID="cri-o://d97ba222b2d09fcffb95a8b1570d1b6715a8a80dc93a3cab2c934684e55889ce" gracePeriod=30 Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.330107 4967 generic.go:334] "Generic (PLEG): container finished" podID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerID="d97ba222b2d09fcffb95a8b1570d1b6715a8a80dc93a3cab2c934684e55889ce" exitCode=0 Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.330187 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" event={"ID":"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807","Type":"ContainerDied","Data":"d97ba222b2d09fcffb95a8b1570d1b6715a8a80dc93a3cab2c934684e55889ce"} Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.332421 4967 generic.go:334] "Generic (PLEG): container finished" podID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerID="4c95e49451563c84ef0984e57a9359488f85dfd2faeedd8070417de4f98e2c5a" exitCode=0 Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.332446 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" event={"ID":"1def5571-fff5-47d8-b9bd-13ee21c73760","Type":"ContainerDied","Data":"4c95e49451563c84ef0984e57a9359488f85dfd2faeedd8070417de4f98e2c5a"} Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.746638 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.933931 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca\") pod \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.934002 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vb2fv\" (UniqueName: \"kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv\") pod \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.934036 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert\") pod \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.934085 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config\") pod \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\" (UID: \"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807\") " Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.934765 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca" (OuterVolumeSpecName: "client-ca") pod "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" (UID: "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.934884 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config" (OuterVolumeSpecName: "config") pod "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" (UID: "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.942712 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" (UID: "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.942720 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv" (OuterVolumeSpecName: "kube-api-access-vb2fv") pod "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" (UID: "14b6d0fb-f8e7-4ed0-81e6-7aaacb172807"). InnerVolumeSpecName "kube-api-access-vb2fv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:47:40 crc kubenswrapper[4967]: I1121 15:47:40.979933 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.036304 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.036364 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.036378 4967 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.036403 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vb2fv\" (UniqueName: \"kubernetes.io/projected/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807-kube-api-access-vb2fv\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.137824 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles\") pod \"1def5571-fff5-47d8-b9bd-13ee21c73760\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.137889 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8bzl\" (UniqueName: \"kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl\") pod \"1def5571-fff5-47d8-b9bd-13ee21c73760\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.137947 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca\") pod \"1def5571-fff5-47d8-b9bd-13ee21c73760\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.138008 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert\") pod \"1def5571-fff5-47d8-b9bd-13ee21c73760\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.138046 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config\") pod \"1def5571-fff5-47d8-b9bd-13ee21c73760\" (UID: \"1def5571-fff5-47d8-b9bd-13ee21c73760\") " Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.138729 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "1def5571-fff5-47d8-b9bd-13ee21c73760" (UID: "1def5571-fff5-47d8-b9bd-13ee21c73760"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.138741 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca" (OuterVolumeSpecName: "client-ca") pod "1def5571-fff5-47d8-b9bd-13ee21c73760" (UID: "1def5571-fff5-47d8-b9bd-13ee21c73760"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.138904 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config" (OuterVolumeSpecName: "config") pod "1def5571-fff5-47d8-b9bd-13ee21c73760" (UID: "1def5571-fff5-47d8-b9bd-13ee21c73760"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.139147 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.139160 4967 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.139170 4967 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1def5571-fff5-47d8-b9bd-13ee21c73760-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.141822 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl" (OuterVolumeSpecName: "kube-api-access-j8bzl") pod "1def5571-fff5-47d8-b9bd-13ee21c73760" (UID: "1def5571-fff5-47d8-b9bd-13ee21c73760"). InnerVolumeSpecName "kube-api-access-j8bzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.141818 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1def5571-fff5-47d8-b9bd-13ee21c73760" (UID: "1def5571-fff5-47d8-b9bd-13ee21c73760"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.240608 4967 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1def5571-fff5-47d8-b9bd-13ee21c73760-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.240644 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8bzl\" (UniqueName: \"kubernetes.io/projected/1def5571-fff5-47d8-b9bd-13ee21c73760-kube-api-access-j8bzl\") on node \"crc\" DevicePath \"\"" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.340909 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" event={"ID":"1def5571-fff5-47d8-b9bd-13ee21c73760","Type":"ContainerDied","Data":"68e789e782defb92a21d8a3484d33dbefe1ab20515554d088d63ec798724e58b"} Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.340958 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4wxnm" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.340977 4967 scope.go:117] "RemoveContainer" containerID="4c95e49451563c84ef0984e57a9359488f85dfd2faeedd8070417de4f98e2c5a" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.342873 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" event={"ID":"14b6d0fb-f8e7-4ed0-81e6-7aaacb172807","Type":"ContainerDied","Data":"1d69dc902918170b08c111beb5f0c739d5a0d7c1f33c567a550a67ac6f1ac57c"} Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.342997 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.359544 4967 scope.go:117] "RemoveContainer" containerID="d97ba222b2d09fcffb95a8b1570d1b6715a8a80dc93a3cab2c934684e55889ce" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.377185 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.386272 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4wxnm"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.393921 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.398443 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v9g6l"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.572588 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5d795f4d86-64xs7"] Nov 21 15:47:41 crc kubenswrapper[4967]: E1121 15:47:41.572967 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerName="controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.572996 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerName="controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: E1121 15:47:41.573017 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerName="route-controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.573026 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerName="route-controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.573220 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" containerName="controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.573251 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" containerName="route-controller-manager" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.573890 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.575988 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.576134 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.576359 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.576367 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.577509 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.580893 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.590717 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.631396 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.632583 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.635371 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.635538 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.636152 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d795f4d86-64xs7"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.636374 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.636554 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.638549 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.639064 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.640026 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx"] Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746226 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-client-ca\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746266 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-proxy-ca-bundles\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746297 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-config\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746587 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-config\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746666 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-client-ca\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746810 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vvf9\" (UniqueName: \"kubernetes.io/projected/4c3477d6-9324-44a1-8207-b4587c46eb02-kube-api-access-6vvf9\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746892 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3477d6-9324-44a1-8207-b4587c46eb02-serving-cert\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746921 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddc1c0c-9146-444c-8597-7be6bb68a530-serving-cert\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.746970 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p69h\" (UniqueName: \"kubernetes.io/projected/7ddc1c0c-9146-444c-8597-7be6bb68a530-kube-api-access-7p69h\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848529 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vvf9\" (UniqueName: \"kubernetes.io/projected/4c3477d6-9324-44a1-8207-b4587c46eb02-kube-api-access-6vvf9\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848604 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3477d6-9324-44a1-8207-b4587c46eb02-serving-cert\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848629 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddc1c0c-9146-444c-8597-7be6bb68a530-serving-cert\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848667 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p69h\" (UniqueName: \"kubernetes.io/projected/7ddc1c0c-9146-444c-8597-7be6bb68a530-kube-api-access-7p69h\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848706 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-client-ca\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848728 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-proxy-ca-bundles\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848756 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-config\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848908 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-config\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.848947 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-client-ca\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.850146 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-client-ca\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.850202 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-client-ca\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.850519 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddc1c0c-9146-444c-8597-7be6bb68a530-config\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.850664 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-proxy-ca-bundles\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.850719 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c3477d6-9324-44a1-8207-b4587c46eb02-config\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.852301 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ddc1c0c-9146-444c-8597-7be6bb68a530-serving-cert\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.852394 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4c3477d6-9324-44a1-8207-b4587c46eb02-serving-cert\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.865757 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vvf9\" (UniqueName: \"kubernetes.io/projected/4c3477d6-9324-44a1-8207-b4587c46eb02-kube-api-access-6vvf9\") pod \"controller-manager-5d795f4d86-64xs7\" (UID: \"4c3477d6-9324-44a1-8207-b4587c46eb02\") " pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.866107 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p69h\" (UniqueName: \"kubernetes.io/projected/7ddc1c0c-9146-444c-8597-7be6bb68a530-kube-api-access-7p69h\") pod \"route-controller-manager-745bcc88bb-ncrhx\" (UID: \"7ddc1c0c-9146-444c-8597-7be6bb68a530\") " pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.939841 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:41 crc kubenswrapper[4967]: I1121 15:47:41.965452 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:42 crc kubenswrapper[4967]: I1121 15:47:42.412192 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx"] Nov 21 15:47:42 crc kubenswrapper[4967]: I1121 15:47:42.439453 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5d795f4d86-64xs7"] Nov 21 15:47:42 crc kubenswrapper[4967]: I1121 15:47:42.545867 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14b6d0fb-f8e7-4ed0-81e6-7aaacb172807" path="/var/lib/kubelet/pods/14b6d0fb-f8e7-4ed0-81e6-7aaacb172807/volumes" Nov 21 15:47:42 crc kubenswrapper[4967]: I1121 15:47:42.546681 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1def5571-fff5-47d8-b9bd-13ee21c73760" path="/var/lib/kubelet/pods/1def5571-fff5-47d8-b9bd-13ee21c73760/volumes" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.360502 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" event={"ID":"4c3477d6-9324-44a1-8207-b4587c46eb02","Type":"ContainerStarted","Data":"69fb34fa3feab969a58fe3932c5a3ee664bcfff74474d5e98dc303cbc3d62741"} Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.361192 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.361755 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" event={"ID":"4c3477d6-9324-44a1-8207-b4587c46eb02","Type":"ContainerStarted","Data":"c49376b06beb20161f531a2be8dfa1db9679e85b346f2ef3e360be472faf350c"} Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.362161 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" event={"ID":"7ddc1c0c-9146-444c-8597-7be6bb68a530","Type":"ContainerStarted","Data":"bf84a6131ef52a12d51d846231b63c918df7f050106d9c378b4a0cde3d8d3411"} Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.362214 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" event={"ID":"7ddc1c0c-9146-444c-8597-7be6bb68a530","Type":"ContainerStarted","Data":"bcbf4165d00009124d20ec4b8d3b731d6a05655f148c1dd7681d0f274ac39534"} Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.362672 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.366503 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.368485 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.385399 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5d795f4d86-64xs7" podStartSLOduration=3.385383489 podStartE2EDuration="3.385383489s" podCreationTimestamp="2025-11-21 15:47:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:47:43.380518779 +0000 UTC m=+751.639039787" watchObservedRunningTime="2025-11-21 15:47:43.385383489 +0000 UTC m=+751.643904497" Nov 21 15:47:43 crc kubenswrapper[4967]: I1121 15:47:43.401921 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" podStartSLOduration=3.401905383 podStartE2EDuration="3.401905383s" podCreationTimestamp="2025-11-21 15:47:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:47:43.396476857 +0000 UTC m=+751.654997895" watchObservedRunningTime="2025-11-21 15:47:43.401905383 +0000 UTC m=+751.660426401" Nov 21 15:47:46 crc kubenswrapper[4967]: I1121 15:47:46.522432 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:47:46 crc kubenswrapper[4967]: I1121 15:47:46.522782 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:47:47 crc kubenswrapper[4967]: I1121 15:47:47.962678 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-56lsk" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.574137 4967 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.668089 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.669794 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.684099 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.757338 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.757496 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.757590 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j64q7\" (UniqueName: \"kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.859391 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.859799 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j64q7\" (UniqueName: \"kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.859902 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.859938 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.860173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.881643 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j64q7\" (UniqueName: \"kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7\") pod \"redhat-marketplace-x2lz9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.944935 4967 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 21 15:47:48 crc kubenswrapper[4967]: I1121 15:47:48.945200 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 15:47:49 crc kubenswrapper[4967]: I1121 15:47:49.031807 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:49 crc kubenswrapper[4967]: I1121 15:47:49.054299 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Nov 21 15:47:49 crc kubenswrapper[4967]: I1121 15:47:49.231949 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Nov 21 15:47:49 crc kubenswrapper[4967]: I1121 15:47:49.541871 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:47:50 crc kubenswrapper[4967]: I1121 15:47:50.424261 4967 generic.go:334] "Generic (PLEG): container finished" podID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerID="eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40" exitCode=0 Nov 21 15:47:50 crc kubenswrapper[4967]: I1121 15:47:50.424809 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerDied","Data":"eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40"} Nov 21 15:47:50 crc kubenswrapper[4967]: I1121 15:47:50.424839 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerStarted","Data":"07ab341dd1c57e2a30e3670d496ad8181beb5974481c14bc542a86954457a494"} Nov 21 15:47:51 crc kubenswrapper[4967]: I1121 15:47:51.432902 4967 generic.go:334] "Generic (PLEG): container finished" podID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerID="18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e" exitCode=0 Nov 21 15:47:51 crc kubenswrapper[4967]: I1121 15:47:51.432963 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerDied","Data":"18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e"} Nov 21 15:47:52 crc kubenswrapper[4967]: I1121 15:47:52.440743 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerStarted","Data":"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b"} Nov 21 15:47:52 crc kubenswrapper[4967]: I1121 15:47:52.459896 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x2lz9" podStartSLOduration=3.083389509 podStartE2EDuration="4.459879517s" podCreationTimestamp="2025-11-21 15:47:48 +0000 UTC" firstStartedPulling="2025-11-21 15:47:50.429054219 +0000 UTC m=+758.687575227" lastFinishedPulling="2025-11-21 15:47:51.805544237 +0000 UTC m=+760.064065235" observedRunningTime="2025-11-21 15:47:52.457493388 +0000 UTC m=+760.716014406" watchObservedRunningTime="2025-11-21 15:47:52.459879517 +0000 UTC m=+760.718400525" Nov 21 15:47:57 crc kubenswrapper[4967]: I1121 15:47:57.733523 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-w6drw" Nov 21 15:47:58 crc kubenswrapper[4967]: I1121 15:47:58.208087 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-dsn5g" Nov 21 15:47:58 crc kubenswrapper[4967]: I1121 15:47:58.984476 4967 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Nov 21 15:47:58 crc kubenswrapper[4967]: I1121 15:47:58.984540 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 15:47:59 crc kubenswrapper[4967]: I1121 15:47:59.032501 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:59 crc kubenswrapper[4967]: I1121 15:47:59.032560 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:59 crc kubenswrapper[4967]: I1121 15:47:59.072701 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:59 crc kubenswrapper[4967]: I1121 15:47:59.693503 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:47:59 crc kubenswrapper[4967]: I1121 15:47:59.735705 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:48:01 crc kubenswrapper[4967]: I1121 15:48:01.668186 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x2lz9" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="registry-server" containerID="cri-o://3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b" gracePeriod=2 Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.138580 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.264931 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j64q7\" (UniqueName: \"kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7\") pod \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.265014 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content\") pod \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.265142 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities\") pod \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\" (UID: \"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9\") " Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.266401 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities" (OuterVolumeSpecName: "utilities") pod "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" (UID: "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.270476 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7" (OuterVolumeSpecName: "kube-api-access-j64q7") pod "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" (UID: "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9"). InnerVolumeSpecName "kube-api-access-j64q7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.367051 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j64q7\" (UniqueName: \"kubernetes.io/projected/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-kube-api-access-j64q7\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.367094 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.678088 4967 generic.go:334] "Generic (PLEG): container finished" podID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerID="3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b" exitCode=0 Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.678438 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerDied","Data":"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b"} Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.678471 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x2lz9" event={"ID":"6c4d817a-f606-4d9b-9eaa-ce3b93c100b9","Type":"ContainerDied","Data":"07ab341dd1c57e2a30e3670d496ad8181beb5974481c14bc542a86954457a494"} Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.678498 4967 scope.go:117] "RemoveContainer" containerID="3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.678638 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x2lz9" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.703038 4967 scope.go:117] "RemoveContainer" containerID="18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.718342 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" (UID: "6c4d817a-f606-4d9b-9eaa-ce3b93c100b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.725453 4967 scope.go:117] "RemoveContainer" containerID="eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.758232 4967 scope.go:117] "RemoveContainer" containerID="3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b" Nov 21 15:48:02 crc kubenswrapper[4967]: E1121 15:48:02.758636 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b\": container with ID starting with 3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b not found: ID does not exist" containerID="3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.758676 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b"} err="failed to get container status \"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b\": rpc error: code = NotFound desc = could not find container \"3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b\": container with ID starting with 3378addd2e98ab3aec418bc1e01448ae3d50be504cd3baef63cdc8159a963b6b not found: ID does not exist" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.758701 4967 scope.go:117] "RemoveContainer" containerID="18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e" Nov 21 15:48:02 crc kubenswrapper[4967]: E1121 15:48:02.758960 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e\": container with ID starting with 18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e not found: ID does not exist" containerID="18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.758982 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e"} err="failed to get container status \"18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e\": rpc error: code = NotFound desc = could not find container \"18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e\": container with ID starting with 18da309fde5f9c0be0ad5decd898f5e571a7edd522f59f5300a3181a576a1d4e not found: ID does not exist" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.758998 4967 scope.go:117] "RemoveContainer" containerID="eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40" Nov 21 15:48:02 crc kubenswrapper[4967]: E1121 15:48:02.759408 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40\": container with ID starting with eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40 not found: ID does not exist" containerID="eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.759426 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40"} err="failed to get container status \"eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40\": rpc error: code = NotFound desc = could not find container \"eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40\": container with ID starting with eb5b12258fc8a8afb599263c498652a204e1c80dcf8543129e47f75db0c3dd40 not found: ID does not exist" Nov 21 15:48:02 crc kubenswrapper[4967]: I1121 15:48:02.772119 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:03 crc kubenswrapper[4967]: I1121 15:48:03.013167 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:48:03 crc kubenswrapper[4967]: I1121 15:48:03.019961 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x2lz9"] Nov 21 15:48:04 crc kubenswrapper[4967]: I1121 15:48:04.545622 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" path="/var/lib/kubelet/pods/6c4d817a-f606-4d9b-9eaa-ce3b93c100b9/volumes" Nov 21 15:48:08 crc kubenswrapper[4967]: I1121 15:48:08.941347 4967 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 21 15:48:08 crc kubenswrapper[4967]: I1121 15:48:08.941694 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.873460 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:14 crc kubenswrapper[4967]: E1121 15:48:14.874249 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="registry-server" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.874263 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="registry-server" Nov 21 15:48:14 crc kubenswrapper[4967]: E1121 15:48:14.874290 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="extract-content" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.874298 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="extract-content" Nov 21 15:48:14 crc kubenswrapper[4967]: E1121 15:48:14.874323 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="extract-utilities" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.874334 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="extract-utilities" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.874476 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c4d817a-f606-4d9b-9eaa-ce3b93c100b9" containerName="registry-server" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.875722 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.882640 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.965342 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.965450 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgzng\" (UniqueName: \"kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:14 crc kubenswrapper[4967]: I1121 15:48:14.965497 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.067210 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgzng\" (UniqueName: \"kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.067302 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.067395 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.067913 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.067967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.093216 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgzng\" (UniqueName: \"kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng\") pod \"certified-operators-tqcxf\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.192429 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.708815 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:15 crc kubenswrapper[4967]: W1121 15:48:15.716124 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda45b01b9_ff37_44d9_acf1_4edbd9878c0b.slice/crio-15344d57ba2e653b569f38c9a7a7627212f5d680117584bd461bbcbb45831acd WatchSource:0}: Error finding container 15344d57ba2e653b569f38c9a7a7627212f5d680117584bd461bbcbb45831acd: Status 404 returned error can't find the container with id 15344d57ba2e653b569f38c9a7a7627212f5d680117584bd461bbcbb45831acd Nov 21 15:48:15 crc kubenswrapper[4967]: I1121 15:48:15.763184 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerStarted","Data":"15344d57ba2e653b569f38c9a7a7627212f5d680117584bd461bbcbb45831acd"} Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.522127 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.522185 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.522231 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.522898 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.522948 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8" gracePeriod=600 Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.772817 4967 generic.go:334] "Generic (PLEG): container finished" podID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerID="4b28ad9b2b7b62f574c2588f6905ac6b569e8c17894881fd831a4c33d10a4338" exitCode=0 Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.772872 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerDied","Data":"4b28ad9b2b7b62f574c2588f6905ac6b569e8c17894881fd831a4c33d10a4338"} Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.776869 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8" exitCode=0 Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.776911 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8"} Nov 21 15:48:16 crc kubenswrapper[4967]: I1121 15:48:16.776948 4967 scope.go:117] "RemoveContainer" containerID="02dde30aac8f6d27fad1d64a7cac03cf4e5c604ca91f456020aea82d85f77a2a" Nov 21 15:48:17 crc kubenswrapper[4967]: I1121 15:48:17.784173 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24"} Nov 21 15:48:18 crc kubenswrapper[4967]: I1121 15:48:18.792164 4967 generic.go:334] "Generic (PLEG): container finished" podID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerID="d60228cb6b65f0d64a83f7dad209e6d8505da30c25df326b70ed3c4aae8836f9" exitCode=0 Nov 21 15:48:18 crc kubenswrapper[4967]: I1121 15:48:18.792402 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerDied","Data":"d60228cb6b65f0d64a83f7dad209e6d8505da30c25df326b70ed3c4aae8836f9"} Nov 21 15:48:18 crc kubenswrapper[4967]: I1121 15:48:18.946682 4967 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Nov 21 15:48:18 crc kubenswrapper[4967]: I1121 15:48:18.946775 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 15:48:19 crc kubenswrapper[4967]: I1121 15:48:19.801576 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerStarted","Data":"d2799cad3991fdc4ade4492d502f4b50c14b5fedd82c3b41ba04f2e4f9a719a5"} Nov 21 15:48:19 crc kubenswrapper[4967]: I1121 15:48:19.822592 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tqcxf" podStartSLOduration=3.33777177 podStartE2EDuration="5.82257224s" podCreationTimestamp="2025-11-21 15:48:14 +0000 UTC" firstStartedPulling="2025-11-21 15:48:16.774780839 +0000 UTC m=+785.033301847" lastFinishedPulling="2025-11-21 15:48:19.259581309 +0000 UTC m=+787.518102317" observedRunningTime="2025-11-21 15:48:19.82085325 +0000 UTC m=+788.079374258" watchObservedRunningTime="2025-11-21 15:48:19.82257224 +0000 UTC m=+788.081093238" Nov 21 15:48:25 crc kubenswrapper[4967]: I1121 15:48:25.192913 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:25 crc kubenswrapper[4967]: I1121 15:48:25.193548 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:25 crc kubenswrapper[4967]: I1121 15:48:25.238670 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:25 crc kubenswrapper[4967]: I1121 15:48:25.878495 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:25 crc kubenswrapper[4967]: I1121 15:48:25.915959 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:27 crc kubenswrapper[4967]: I1121 15:48:27.852112 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tqcxf" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="registry-server" containerID="cri-o://d2799cad3991fdc4ade4492d502f4b50c14b5fedd82c3b41ba04f2e4f9a719a5" gracePeriod=2 Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.863141 4967 generic.go:334] "Generic (PLEG): container finished" podID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerID="d2799cad3991fdc4ade4492d502f4b50c14b5fedd82c3b41ba04f2e4f9a719a5" exitCode=0 Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.863205 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerDied","Data":"d2799cad3991fdc4ade4492d502f4b50c14b5fedd82c3b41ba04f2e4f9a719a5"} Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.918193 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.944637 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.995132 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content\") pod \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.995289 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgzng\" (UniqueName: \"kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng\") pod \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.995364 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities\") pod \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\" (UID: \"a45b01b9-ff37-44d9-acf1-4edbd9878c0b\") " Nov 21 15:48:28 crc kubenswrapper[4967]: I1121 15:48:28.996406 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities" (OuterVolumeSpecName: "utilities") pod "a45b01b9-ff37-44d9-acf1-4edbd9878c0b" (UID: "a45b01b9-ff37-44d9-acf1-4edbd9878c0b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.004579 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng" (OuterVolumeSpecName: "kube-api-access-mgzng") pod "a45b01b9-ff37-44d9-acf1-4edbd9878c0b" (UID: "a45b01b9-ff37-44d9-acf1-4edbd9878c0b"). InnerVolumeSpecName "kube-api-access-mgzng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.097131 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgzng\" (UniqueName: \"kubernetes.io/projected/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-kube-api-access-mgzng\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.097441 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.865071 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:29 crc kubenswrapper[4967]: E1121 15:48:29.867126 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="extract-content" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.867236 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="extract-content" Nov 21 15:48:29 crc kubenswrapper[4967]: E1121 15:48:29.867263 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="extract-utilities" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.867271 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="extract-utilities" Nov 21 15:48:29 crc kubenswrapper[4967]: E1121 15:48:29.867279 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="registry-server" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.867285 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="registry-server" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.869150 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" containerName="registry-server" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.876529 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.891899 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.897081 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tqcxf" event={"ID":"a45b01b9-ff37-44d9-acf1-4edbd9878c0b","Type":"ContainerDied","Data":"15344d57ba2e653b569f38c9a7a7627212f5d680117584bd461bbcbb45831acd"} Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.897161 4967 scope.go:117] "RemoveContainer" containerID="d2799cad3991fdc4ade4492d502f4b50c14b5fedd82c3b41ba04f2e4f9a719a5" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.897182 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tqcxf" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.911889 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.911970 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.912010 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sg2l\" (UniqueName: \"kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.924523 4967 scope.go:117] "RemoveContainer" containerID="d60228cb6b65f0d64a83f7dad209e6d8505da30c25df326b70ed3c4aae8836f9" Nov 21 15:48:29 crc kubenswrapper[4967]: I1121 15:48:29.948360 4967 scope.go:117] "RemoveContainer" containerID="4b28ad9b2b7b62f574c2588f6905ac6b569e8c17894881fd831a4c33d10a4338" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.013786 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.013872 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.013921 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sg2l\" (UniqueName: \"kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.014246 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.014682 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.049868 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sg2l\" (UniqueName: \"kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l\") pod \"redhat-operators-b5w9n\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.193957 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.614842 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:30 crc kubenswrapper[4967]: I1121 15:48:30.910528 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerStarted","Data":"2e80249d65c275b8e22575d099661bb11c13aa66025315e7aa05f01836042f5f"} Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.585390 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a45b01b9-ff37-44d9-acf1-4edbd9878c0b" (UID: "a45b01b9-ff37-44d9-acf1-4edbd9878c0b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.641303 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45b01b9-ff37-44d9-acf1-4edbd9878c0b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.732727 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.741119 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tqcxf"] Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.918643 4967 generic.go:334] "Generic (PLEG): container finished" podID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerID="fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa" exitCode=0 Nov 21 15:48:31 crc kubenswrapper[4967]: I1121 15:48:31.918694 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerDied","Data":"fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa"} Nov 21 15:48:32 crc kubenswrapper[4967]: I1121 15:48:32.545218 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a45b01b9-ff37-44d9-acf1-4edbd9878c0b" path="/var/lib/kubelet/pods/a45b01b9-ff37-44d9-acf1-4edbd9878c0b/volumes" Nov 21 15:48:33 crc kubenswrapper[4967]: I1121 15:48:33.934754 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerStarted","Data":"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b"} Nov 21 15:48:34 crc kubenswrapper[4967]: I1121 15:48:34.946988 4967 generic.go:334] "Generic (PLEG): container finished" podID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerID="7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b" exitCode=0 Nov 21 15:48:34 crc kubenswrapper[4967]: I1121 15:48:34.947061 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerDied","Data":"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b"} Nov 21 15:48:35 crc kubenswrapper[4967]: I1121 15:48:35.959090 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerStarted","Data":"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f"} Nov 21 15:48:35 crc kubenswrapper[4967]: I1121 15:48:35.977474 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b5w9n" podStartSLOduration=3.378391676 podStartE2EDuration="6.977455492s" podCreationTimestamp="2025-11-21 15:48:29 +0000 UTC" firstStartedPulling="2025-11-21 15:48:31.920090738 +0000 UTC m=+800.178611746" lastFinishedPulling="2025-11-21 15:48:35.519154554 +0000 UTC m=+803.777675562" observedRunningTime="2025-11-21 15:48:35.975117735 +0000 UTC m=+804.233638743" watchObservedRunningTime="2025-11-21 15:48:35.977455492 +0000 UTC m=+804.235976500" Nov 21 15:48:40 crc kubenswrapper[4967]: I1121 15:48:40.195180 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:40 crc kubenswrapper[4967]: I1121 15:48:40.196854 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:41 crc kubenswrapper[4967]: I1121 15:48:41.237838 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b5w9n" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="registry-server" probeResult="failure" output=< Nov 21 15:48:41 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:48:41 crc kubenswrapper[4967]: > Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.400697 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.402270 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.412114 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.543492 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.543538 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thh6x\" (UniqueName: \"kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.543573 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.645200 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.645256 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thh6x\" (UniqueName: \"kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.645296 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.645769 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.645848 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.665915 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thh6x\" (UniqueName: \"kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x\") pod \"community-operators-b7mng\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:42 crc kubenswrapper[4967]: I1121 15:48:42.721229 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:43 crc kubenswrapper[4967]: I1121 15:48:43.191376 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:48:44 crc kubenswrapper[4967]: I1121 15:48:44.012485 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerStarted","Data":"e85a8ce31b6b2f7ab1f202e646a71e35156fe3ed645e798332bab10d32e6ddb3"} Nov 21 15:48:46 crc kubenswrapper[4967]: I1121 15:48:46.027143 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerStarted","Data":"f71d380bf72a34d464fdcebcde1235bd51e8dfdd202a03e7cb41db775399d54d"} Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.035198 4967 generic.go:334] "Generic (PLEG): container finished" podID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerID="f71d380bf72a34d464fdcebcde1235bd51e8dfdd202a03e7cb41db775399d54d" exitCode=0 Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.035263 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerDied","Data":"f71d380bf72a34d464fdcebcde1235bd51e8dfdd202a03e7cb41db775399d54d"} Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.761537 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-4tggd"] Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.762723 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.767433 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.768745 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.768894 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-56tvw" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.769029 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.769150 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.778729 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.788153 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-4tggd"] Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830007 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830067 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830136 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830179 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830206 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830236 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830255 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830293 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830336 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxw65\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830360 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.830382 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.850978 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-4tggd"] Nov 21 15:48:47 crc kubenswrapper[4967]: E1121 15:48:47.851595 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-hxw65 metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-4tggd" podUID="8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931692 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931741 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931799 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931833 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931853 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931875 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931890 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931923 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931944 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxw65\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931959 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.931975 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.932765 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: E1121 15:48:47.932856 4967 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: secret "collector-metrics" not found Nov 21 15:48:47 crc kubenswrapper[4967]: E1121 15:48:47.932907 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics podName:8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28 nodeName:}" failed. No retries permitted until 2025-11-21 15:48:48.43288939 +0000 UTC m=+816.691410398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics") pod "collector-4tggd" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28") : secret "collector-metrics" not found Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.933733 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.938188 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.939870 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.939933 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.939953 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.940031 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.940535 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.952356 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxw65\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:47 crc kubenswrapper[4967]: I1121 15:48:47.957204 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.041198 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-4tggd" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.050589 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-4tggd" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134299 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134385 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134417 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxw65\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134515 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134617 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134660 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134760 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134774 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.134847 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.135626 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.135685 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.135704 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir" (OuterVolumeSpecName: "datadir") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.135745 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136118 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.135655 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config" (OuterVolumeSpecName: "config") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136585 4967 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-entrypoint\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136612 4967 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136630 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136670 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.136683 4967 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-datadir\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.138291 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp" (OuterVolumeSpecName: "tmp") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.138562 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.139800 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65" (OuterVolumeSpecName: "kube-api-access-hxw65") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "kube-api-access-hxw65". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.141402 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token" (OuterVolumeSpecName: "collector-token") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.145295 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token" (OuterVolumeSpecName: "sa-token") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.238234 4967 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-tmp\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.238270 4967 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.238284 4967 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.238296 4967 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-collector-token\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.238319 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxw65\" (UniqueName: \"kubernetes.io/projected/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-kube-api-access-hxw65\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.441251 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.448273 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") pod \"collector-4tggd\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " pod="openshift-logging/collector-4tggd" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.542791 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") pod \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\" (UID: \"8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28\") " Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.546764 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics" (OuterVolumeSpecName: "metrics") pod "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" (UID: "8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28"). InnerVolumeSpecName "metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:48:48 crc kubenswrapper[4967]: I1121 15:48:48.646644 4967 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.048005 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-4tggd" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.112941 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-4tggd"] Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.122786 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-4tggd"] Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.144433 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-jv4x8"] Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.145547 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.148923 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.149831 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.149894 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-56tvw" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.150145 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.150382 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.154062 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-jv4x8"] Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.157801 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255300 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/480deae0-8dd9-46d9-86fd-19cda7420bf5-tmp\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255410 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-trusted-ca\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255510 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/480deae0-8dd9-46d9-86fd-19cda7420bf5-datadir\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255531 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255584 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-metrics\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255605 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-syslog-receiver\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255630 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-entrypoint\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255661 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255697 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-sa-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255744 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nx4l\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-kube-api-access-8nx4l\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.255761 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config-openshift-service-cacrt\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.357347 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-entrypoint\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.357816 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.357861 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-sa-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.357910 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nx4l\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-kube-api-access-8nx4l\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.357936 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config-openshift-service-cacrt\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358003 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/480deae0-8dd9-46d9-86fd-19cda7420bf5-tmp\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358038 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-trusted-ca\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358168 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-entrypoint\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358170 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/480deae0-8dd9-46d9-86fd-19cda7420bf5-datadir\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358207 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/480deae0-8dd9-46d9-86fd-19cda7420bf5-datadir\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358227 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-metrics\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.358342 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-syslog-receiver\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.360103 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.360153 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-config-openshift-service-cacrt\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.360484 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/480deae0-8dd9-46d9-86fd-19cda7420bf5-trusted-ca\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.363461 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-metrics\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.371861 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/480deae0-8dd9-46d9-86fd-19cda7420bf5-tmp\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.371935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-syslog-receiver\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.371952 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/480deae0-8dd9-46d9-86fd-19cda7420bf5-collector-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.374710 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-sa-token\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.375620 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nx4l\" (UniqueName: \"kubernetes.io/projected/480deae0-8dd9-46d9-86fd-19cda7420bf5-kube-api-access-8nx4l\") pod \"collector-jv4x8\" (UID: \"480deae0-8dd9-46d9-86fd-19cda7420bf5\") " pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.461951 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-jv4x8" Nov 21 15:48:49 crc kubenswrapper[4967]: I1121 15:48:49.879631 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-jv4x8"] Nov 21 15:48:49 crc kubenswrapper[4967]: W1121 15:48:49.885765 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod480deae0_8dd9_46d9_86fd_19cda7420bf5.slice/crio-b573abcfa9db4aaa5ca9263fe449ff7d553367df95601a397704e607cbf12263 WatchSource:0}: Error finding container b573abcfa9db4aaa5ca9263fe449ff7d553367df95601a397704e607cbf12263: Status 404 returned error can't find the container with id b573abcfa9db4aaa5ca9263fe449ff7d553367df95601a397704e607cbf12263 Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.057175 4967 generic.go:334] "Generic (PLEG): container finished" podID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerID="1f01b12da733f86df5463a63aaaa7d707da566a28b44b2d200408e1734e2de07" exitCode=0 Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.057272 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerDied","Data":"1f01b12da733f86df5463a63aaaa7d707da566a28b44b2d200408e1734e2de07"} Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.058657 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-jv4x8" event={"ID":"480deae0-8dd9-46d9-86fd-19cda7420bf5","Type":"ContainerStarted","Data":"b573abcfa9db4aaa5ca9263fe449ff7d553367df95601a397704e607cbf12263"} Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.235450 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.281832 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:50 crc kubenswrapper[4967]: I1121 15:48:50.546046 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28" path="/var/lib/kubelet/pods/8d5ba1b6-18ad-49e9-b08f-fa9ffa1c0e28/volumes" Nov 21 15:48:51 crc kubenswrapper[4967]: I1121 15:48:51.291298 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.075789 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerStarted","Data":"5bf0aa85222322b1bb44f62ebcf20ec3f513a855f5817a2554703de6d1aca9ee"} Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.076056 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b5w9n" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="registry-server" containerID="cri-o://9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f" gracePeriod=2 Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.100084 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b7mng" podStartSLOduration=6.295687258 podStartE2EDuration="10.100062703s" podCreationTimestamp="2025-11-21 15:48:42 +0000 UTC" firstStartedPulling="2025-11-21 15:48:47.036817674 +0000 UTC m=+815.295338682" lastFinishedPulling="2025-11-21 15:48:50.841193119 +0000 UTC m=+819.099714127" observedRunningTime="2025-11-21 15:48:52.095731339 +0000 UTC m=+820.354252367" watchObservedRunningTime="2025-11-21 15:48:52.100062703 +0000 UTC m=+820.358583711" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.544195 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.615132 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9sg2l\" (UniqueName: \"kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l\") pod \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.615286 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities\") pod \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.615329 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content\") pod \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\" (UID: \"9e1a7716-4267-41d5-8cfb-7e3f9522ef70\") " Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.616927 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities" (OuterVolumeSpecName: "utilities") pod "9e1a7716-4267-41d5-8cfb-7e3f9522ef70" (UID: "9e1a7716-4267-41d5-8cfb-7e3f9522ef70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.636527 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l" (OuterVolumeSpecName: "kube-api-access-9sg2l") pod "9e1a7716-4267-41d5-8cfb-7e3f9522ef70" (UID: "9e1a7716-4267-41d5-8cfb-7e3f9522ef70"). InnerVolumeSpecName "kube-api-access-9sg2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.718084 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9sg2l\" (UniqueName: \"kubernetes.io/projected/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-kube-api-access-9sg2l\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.718131 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.720618 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e1a7716-4267-41d5-8cfb-7e3f9522ef70" (UID: "9e1a7716-4267-41d5-8cfb-7e3f9522ef70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.721999 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.722206 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:48:52 crc kubenswrapper[4967]: I1121 15:48:52.820016 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e1a7716-4267-41d5-8cfb-7e3f9522ef70-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.085807 4967 generic.go:334] "Generic (PLEG): container finished" podID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerID="9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f" exitCode=0 Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.085884 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerDied","Data":"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f"} Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.086021 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b5w9n" event={"ID":"9e1a7716-4267-41d5-8cfb-7e3f9522ef70","Type":"ContainerDied","Data":"2e80249d65c275b8e22575d099661bb11c13aa66025315e7aa05f01836042f5f"} Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.086072 4967 scope.go:117] "RemoveContainer" containerID="9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.086440 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b5w9n" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.102959 4967 scope.go:117] "RemoveContainer" containerID="7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.121998 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.124825 4967 scope.go:117] "RemoveContainer" containerID="fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.128176 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b5w9n"] Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.152381 4967 scope.go:117] "RemoveContainer" containerID="9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f" Nov 21 15:48:53 crc kubenswrapper[4967]: E1121 15:48:53.152700 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f\": container with ID starting with 9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f not found: ID does not exist" containerID="9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.152760 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f"} err="failed to get container status \"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f\": rpc error: code = NotFound desc = could not find container \"9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f\": container with ID starting with 9af6727965cc97883cf4a3d889b0ec189e5bb9828ab6b1197dbc1f0e03d1c41f not found: ID does not exist" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.152806 4967 scope.go:117] "RemoveContainer" containerID="7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b" Nov 21 15:48:53 crc kubenswrapper[4967]: E1121 15:48:53.153169 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b\": container with ID starting with 7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b not found: ID does not exist" containerID="7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.153203 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b"} err="failed to get container status \"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b\": rpc error: code = NotFound desc = could not find container \"7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b\": container with ID starting with 7c4bad324bf2648d99bc7bff427a9ba86d83eecf9faf91efdaa4bec1bcafb53b not found: ID does not exist" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.153226 4967 scope.go:117] "RemoveContainer" containerID="fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa" Nov 21 15:48:53 crc kubenswrapper[4967]: E1121 15:48:53.153578 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa\": container with ID starting with fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa not found: ID does not exist" containerID="fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.153621 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa"} err="failed to get container status \"fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa\": rpc error: code = NotFound desc = could not find container \"fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa\": container with ID starting with fbee68e870890ced0828cb57a72cca8fa6ad9256f841ebe14a1e562b5751beaa not found: ID does not exist" Nov 21 15:48:53 crc kubenswrapper[4967]: I1121 15:48:53.768584 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-b7mng" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="registry-server" probeResult="failure" output=< Nov 21 15:48:53 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:48:53 crc kubenswrapper[4967]: > Nov 21 15:48:54 crc kubenswrapper[4967]: I1121 15:48:54.545906 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" path="/var/lib/kubelet/pods/9e1a7716-4267-41d5-8cfb-7e3f9522ef70/volumes" Nov 21 15:49:02 crc kubenswrapper[4967]: I1121 15:49:02.763852 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:49:02 crc kubenswrapper[4967]: I1121 15:49:02.807329 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:49:02 crc kubenswrapper[4967]: I1121 15:49:02.995919 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:49:04 crc kubenswrapper[4967]: I1121 15:49:04.208012 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-jv4x8" event={"ID":"480deae0-8dd9-46d9-86fd-19cda7420bf5","Type":"ContainerStarted","Data":"a3cf204a66eb6c3c8d86792cf0737b70856d76f4b8a5b5ff1e1bf2d47ae74b5f"} Nov 21 15:49:04 crc kubenswrapper[4967]: I1121 15:49:04.208160 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b7mng" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="registry-server" containerID="cri-o://5bf0aa85222322b1bb44f62ebcf20ec3f513a855f5817a2554703de6d1aca9ee" gracePeriod=2 Nov 21 15:49:04 crc kubenswrapper[4967]: I1121 15:49:04.234160 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-jv4x8" podStartSLOduration=1.185349695 podStartE2EDuration="15.234142921s" podCreationTimestamp="2025-11-21 15:48:49 +0000 UTC" firstStartedPulling="2025-11-21 15:48:49.888405727 +0000 UTC m=+818.146926735" lastFinishedPulling="2025-11-21 15:49:03.937198953 +0000 UTC m=+832.195719961" observedRunningTime="2025-11-21 15:49:04.232828203 +0000 UTC m=+832.491349211" watchObservedRunningTime="2025-11-21 15:49:04.234142921 +0000 UTC m=+832.492663939" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.217587 4967 generic.go:334] "Generic (PLEG): container finished" podID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerID="5bf0aa85222322b1bb44f62ebcf20ec3f513a855f5817a2554703de6d1aca9ee" exitCode=0 Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.217687 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerDied","Data":"5bf0aa85222322b1bb44f62ebcf20ec3f513a855f5817a2554703de6d1aca9ee"} Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.547686 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.739825 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thh6x\" (UniqueName: \"kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x\") pod \"76182221-f594-45eb-9a18-3cd0a2dd25a0\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.739980 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content\") pod \"76182221-f594-45eb-9a18-3cd0a2dd25a0\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.740196 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities\") pod \"76182221-f594-45eb-9a18-3cd0a2dd25a0\" (UID: \"76182221-f594-45eb-9a18-3cd0a2dd25a0\") " Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.741158 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities" (OuterVolumeSpecName: "utilities") pod "76182221-f594-45eb-9a18-3cd0a2dd25a0" (UID: "76182221-f594-45eb-9a18-3cd0a2dd25a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.746058 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x" (OuterVolumeSpecName: "kube-api-access-thh6x") pod "76182221-f594-45eb-9a18-3cd0a2dd25a0" (UID: "76182221-f594-45eb-9a18-3cd0a2dd25a0"). InnerVolumeSpecName "kube-api-access-thh6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.788573 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76182221-f594-45eb-9a18-3cd0a2dd25a0" (UID: "76182221-f594-45eb-9a18-3cd0a2dd25a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.841925 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.841968 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thh6x\" (UniqueName: \"kubernetes.io/projected/76182221-f594-45eb-9a18-3cd0a2dd25a0-kube-api-access-thh6x\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:05 crc kubenswrapper[4967]: I1121 15:49:05.841981 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76182221-f594-45eb-9a18-3cd0a2dd25a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.227162 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b7mng" event={"ID":"76182221-f594-45eb-9a18-3cd0a2dd25a0","Type":"ContainerDied","Data":"e85a8ce31b6b2f7ab1f202e646a71e35156fe3ed645e798332bab10d32e6ddb3"} Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.227225 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b7mng" Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.227260 4967 scope.go:117] "RemoveContainer" containerID="5bf0aa85222322b1bb44f62ebcf20ec3f513a855f5817a2554703de6d1aca9ee" Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.249346 4967 scope.go:117] "RemoveContainer" containerID="1f01b12da733f86df5463a63aaaa7d707da566a28b44b2d200408e1734e2de07" Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.256182 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.261104 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b7mng"] Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.294970 4967 scope.go:117] "RemoveContainer" containerID="f71d380bf72a34d464fdcebcde1235bd51e8dfdd202a03e7cb41db775399d54d" Nov 21 15:49:06 crc kubenswrapper[4967]: I1121 15:49:06.545493 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" path="/var/lib/kubelet/pods/76182221-f594-45eb-9a18-3cd0a2dd25a0/volumes" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.391537 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8"] Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393245 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393331 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393403 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393463 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393526 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="extract-utilities" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393591 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="extract-utilities" Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393660 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="extract-utilities" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393718 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="extract-utilities" Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393777 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="extract-content" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393834 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="extract-content" Nov 21 15:49:26 crc kubenswrapper[4967]: E1121 15:49:26.393889 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="extract-content" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.393938 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="extract-content" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.394132 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e1a7716-4267-41d5-8cfb-7e3f9522ef70" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.394195 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="76182221-f594-45eb-9a18-3cd0a2dd25a0" containerName="registry-server" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.408535 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8"] Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.408761 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.410835 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.559134 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.559589 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c5w2\" (UniqueName: \"kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.559631 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.661185 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.661252 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c5w2\" (UniqueName: \"kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.661289 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.662359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.662587 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.690423 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c5w2\" (UniqueName: \"kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:26 crc kubenswrapper[4967]: I1121 15:49:26.735901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:27 crc kubenswrapper[4967]: I1121 15:49:27.165929 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8"] Nov 21 15:49:27 crc kubenswrapper[4967]: I1121 15:49:27.365206 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerStarted","Data":"dcf963a1772c0e3f5411ca603d450862b532c41c4511fb041b312752b881217c"} Nov 21 15:49:28 crc kubenswrapper[4967]: I1121 15:49:28.372908 4967 generic.go:334] "Generic (PLEG): container finished" podID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerID="37dc7818f665f44e9c1e5f923a1aac55639ecdf11ff3596e2b858c3217f42e95" exitCode=0 Nov 21 15:49:28 crc kubenswrapper[4967]: I1121 15:49:28.373016 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerDied","Data":"37dc7818f665f44e9c1e5f923a1aac55639ecdf11ff3596e2b858c3217f42e95"} Nov 21 15:49:32 crc kubenswrapper[4967]: I1121 15:49:32.404672 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerStarted","Data":"e17dbdf6d766c9f757c289b168134571f0db958b5cdcd8cb665a5620e0896679"} Nov 21 15:49:33 crc kubenswrapper[4967]: I1121 15:49:33.414611 4967 generic.go:334] "Generic (PLEG): container finished" podID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerID="e17dbdf6d766c9f757c289b168134571f0db958b5cdcd8cb665a5620e0896679" exitCode=0 Nov 21 15:49:33 crc kubenswrapper[4967]: I1121 15:49:33.414663 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerDied","Data":"e17dbdf6d766c9f757c289b168134571f0db958b5cdcd8cb665a5620e0896679"} Nov 21 15:49:34 crc kubenswrapper[4967]: I1121 15:49:34.423167 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerStarted","Data":"ab54ae83eeb46863eb2fdbb52d77b4f45ac7c13ed19b0e99c05d24188e7cce1f"} Nov 21 15:49:35 crc kubenswrapper[4967]: I1121 15:49:35.437195 4967 generic.go:334] "Generic (PLEG): container finished" podID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerID="ab54ae83eeb46863eb2fdbb52d77b4f45ac7c13ed19b0e99c05d24188e7cce1f" exitCode=0 Nov 21 15:49:35 crc kubenswrapper[4967]: I1121 15:49:35.437255 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerDied","Data":"ab54ae83eeb46863eb2fdbb52d77b4f45ac7c13ed19b0e99c05d24188e7cce1f"} Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.797825 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.973545 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c5w2\" (UniqueName: \"kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2\") pod \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.973866 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util\") pod \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.973981 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle\") pod \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\" (UID: \"646e86eb-f5d8-43c0-9d54-8fdb55418f0d\") " Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.974424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle" (OuterVolumeSpecName: "bundle") pod "646e86eb-f5d8-43c0-9d54-8fdb55418f0d" (UID: "646e86eb-f5d8-43c0-9d54-8fdb55418f0d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.974602 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.985172 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util" (OuterVolumeSpecName: "util") pod "646e86eb-f5d8-43c0-9d54-8fdb55418f0d" (UID: "646e86eb-f5d8-43c0-9d54-8fdb55418f0d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:49:36 crc kubenswrapper[4967]: I1121 15:49:36.987473 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2" (OuterVolumeSpecName: "kube-api-access-8c5w2") pod "646e86eb-f5d8-43c0-9d54-8fdb55418f0d" (UID: "646e86eb-f5d8-43c0-9d54-8fdb55418f0d"). InnerVolumeSpecName "kube-api-access-8c5w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:49:37 crc kubenswrapper[4967]: I1121 15:49:37.075754 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c5w2\" (UniqueName: \"kubernetes.io/projected/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-kube-api-access-8c5w2\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:37 crc kubenswrapper[4967]: I1121 15:49:37.075790 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/646e86eb-f5d8-43c0-9d54-8fdb55418f0d-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:49:37 crc kubenswrapper[4967]: I1121 15:49:37.452681 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" event={"ID":"646e86eb-f5d8-43c0-9d54-8fdb55418f0d","Type":"ContainerDied","Data":"dcf963a1772c0e3f5411ca603d450862b532c41c4511fb041b312752b881217c"} Nov 21 15:49:37 crc kubenswrapper[4967]: I1121 15:49:37.453006 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dcf963a1772c0e3f5411ca603d450862b532c41c4511fb041b312752b881217c" Nov 21 15:49:37 crc kubenswrapper[4967]: I1121 15:49:37.452763 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.154842 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-sv9h6"] Nov 21 15:49:43 crc kubenswrapper[4967]: E1121 15:49:43.155677 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="util" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.155696 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="util" Nov 21 15:49:43 crc kubenswrapper[4967]: E1121 15:49:43.155718 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="extract" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.155726 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="extract" Nov 21 15:49:43 crc kubenswrapper[4967]: E1121 15:49:43.155744 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="pull" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.155751 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="pull" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.155879 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="646e86eb-f5d8-43c0-9d54-8fdb55418f0d" containerName="extract" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.156476 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.159009 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-4g4xb" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.159239 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.164432 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.169969 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cwt2\" (UniqueName: \"kubernetes.io/projected/d478a29a-6695-471a-b25d-d5c34c6cd916-kube-api-access-7cwt2\") pod \"nmstate-operator-557fdffb88-sv9h6\" (UID: \"d478a29a-6695-471a-b25d-d5c34c6cd916\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.172404 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-sv9h6"] Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.271264 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cwt2\" (UniqueName: \"kubernetes.io/projected/d478a29a-6695-471a-b25d-d5c34c6cd916-kube-api-access-7cwt2\") pod \"nmstate-operator-557fdffb88-sv9h6\" (UID: \"d478a29a-6695-471a-b25d-d5c34c6cd916\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.305789 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cwt2\" (UniqueName: \"kubernetes.io/projected/d478a29a-6695-471a-b25d-d5c34c6cd916-kube-api-access-7cwt2\") pod \"nmstate-operator-557fdffb88-sv9h6\" (UID: \"d478a29a-6695-471a-b25d-d5c34c6cd916\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.497190 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" Nov 21 15:49:43 crc kubenswrapper[4967]: I1121 15:49:43.944368 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-sv9h6"] Nov 21 15:49:43 crc kubenswrapper[4967]: W1121 15:49:43.950455 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd478a29a_6695_471a_b25d_d5c34c6cd916.slice/crio-b69b17d208e8e5313bfab95dcd1081d913a1caacb204943d688f9e86ae87476f WatchSource:0}: Error finding container b69b17d208e8e5313bfab95dcd1081d913a1caacb204943d688f9e86ae87476f: Status 404 returned error can't find the container with id b69b17d208e8e5313bfab95dcd1081d913a1caacb204943d688f9e86ae87476f Nov 21 15:49:44 crc kubenswrapper[4967]: I1121 15:49:44.511653 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" event={"ID":"d478a29a-6695-471a-b25d-d5c34c6cd916","Type":"ContainerStarted","Data":"b69b17d208e8e5313bfab95dcd1081d913a1caacb204943d688f9e86ae87476f"} Nov 21 15:49:47 crc kubenswrapper[4967]: I1121 15:49:47.533503 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" event={"ID":"d478a29a-6695-471a-b25d-d5c34c6cd916","Type":"ContainerStarted","Data":"60922c3b6c5480aa073912eff077882dfd9a3c53d876c71ea023f7b034ae6ce3"} Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.520356 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-sv9h6" podStartSLOduration=2.336873145 podStartE2EDuration="5.520339058s" podCreationTimestamp="2025-11-21 15:49:43 +0000 UTC" firstStartedPulling="2025-11-21 15:49:43.951963355 +0000 UTC m=+872.210484363" lastFinishedPulling="2025-11-21 15:49:47.135429268 +0000 UTC m=+875.393950276" observedRunningTime="2025-11-21 15:49:47.555875705 +0000 UTC m=+875.814396703" watchObservedRunningTime="2025-11-21 15:49:48.520339058 +0000 UTC m=+876.778860066" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.525041 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.526161 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.528867 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2ptqw" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.551194 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.571346 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.574439 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.583608 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.585996 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.622439 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-krp57"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.623604 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.676255 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mrgl\" (UniqueName: \"kubernetes.io/projected/c50f20c0-a383-4f6b-bfb2-407d0311697e-kube-api-access-5mrgl\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.676394 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv6cn\" (UniqueName: \"kubernetes.io/projected/8eecf12e-205c-45b3-8be3-84dd5d0c6803-kube-api-access-hv6cn\") pod \"nmstate-metrics-5dcf9c57c5-fgnlx\" (UID: \"8eecf12e-205c-45b3-8be3-84dd5d0c6803\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.676472 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.721527 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.726805 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.728676 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-2jmxw" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.729004 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.736743 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.742556 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778006 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-ovs-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778077 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv6cn\" (UniqueName: \"kubernetes.io/projected/8eecf12e-205c-45b3-8be3-84dd5d0c6803-kube-api-access-hv6cn\") pod \"nmstate-metrics-5dcf9c57c5-fgnlx\" (UID: \"8eecf12e-205c-45b3-8be3-84dd5d0c6803\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778162 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2hzd\" (UniqueName: \"kubernetes.io/projected/b669d096-a783-4c35-9bd5-a489346af9d8-kube-api-access-h2hzd\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778184 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778204 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-dbus-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778254 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-nmstate-lock\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: E1121 15:49:48.778903 4967 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.778933 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mrgl\" (UniqueName: \"kubernetes.io/projected/c50f20c0-a383-4f6b-bfb2-407d0311697e-kube-api-access-5mrgl\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: E1121 15:49:48.778957 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair podName:c50f20c0-a383-4f6b-bfb2-407d0311697e nodeName:}" failed. No retries permitted until 2025-11-21 15:49:49.27894053 +0000 UTC m=+877.537461538 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair") pod "nmstate-webhook-6b89b748d8-v5hpv" (UID: "c50f20c0-a383-4f6b-bfb2-407d0311697e") : secret "openshift-nmstate-webhook" not found Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.800616 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv6cn\" (UniqueName: \"kubernetes.io/projected/8eecf12e-205c-45b3-8be3-84dd5d0c6803-kube-api-access-hv6cn\") pod \"nmstate-metrics-5dcf9c57c5-fgnlx\" (UID: \"8eecf12e-205c-45b3-8be3-84dd5d0c6803\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.801195 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mrgl\" (UniqueName: \"kubernetes.io/projected/c50f20c0-a383-4f6b-bfb2-407d0311697e-kube-api-access-5mrgl\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880724 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-nmstate-lock\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880800 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrzmb\" (UniqueName: \"kubernetes.io/projected/2a407648-607f-4dc4-a6b6-7ae364ae228b-kube-api-access-wrzmb\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880873 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-ovs-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880909 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-ovs-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880871 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-nmstate-lock\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.880996 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a407648-607f-4dc4-a6b6-7ae364ae228b-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.881033 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2a407648-607f-4dc4-a6b6-7ae364ae228b-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.881149 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2hzd\" (UniqueName: \"kubernetes.io/projected/b669d096-a783-4c35-9bd5-a489346af9d8-kube-api-access-h2hzd\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.881204 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-dbus-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.881609 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/b669d096-a783-4c35-9bd5-a489346af9d8-dbus-socket\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.887288 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.904989 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2hzd\" (UniqueName: \"kubernetes.io/projected/b669d096-a783-4c35-9bd5-a489346af9d8-kube-api-access-h2hzd\") pod \"nmstate-handler-krp57\" (UID: \"b669d096-a783-4c35-9bd5-a489346af9d8\") " pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.949145 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.950750 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.960462 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.973743 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983282 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983343 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a407648-607f-4dc4-a6b6-7ae364ae228b-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983365 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983451 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2a407648-607f-4dc4-a6b6-7ae364ae228b-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983475 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983544 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvdb4\" (UniqueName: \"kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983579 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983597 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrzmb\" (UniqueName: \"kubernetes.io/projected/2a407648-607f-4dc4-a6b6-7ae364ae228b-kube-api-access-wrzmb\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.983617 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:48 crc kubenswrapper[4967]: I1121 15:49:48.986967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/2a407648-607f-4dc4-a6b6-7ae364ae228b-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.006010 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/2a407648-607f-4dc4-a6b6-7ae364ae228b-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.019700 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrzmb\" (UniqueName: \"kubernetes.io/projected/2a407648-607f-4dc4-a6b6-7ae364ae228b-kube-api-access-wrzmb\") pod \"nmstate-console-plugin-5874bd7bc5-cd9cs\" (UID: \"2a407648-607f-4dc4-a6b6-7ae364ae228b\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.043937 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085210 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085266 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085340 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085403 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvdb4\" (UniqueName: \"kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085443 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.085466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.087143 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.090945 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.093130 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.093828 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.098739 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.099516 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.123019 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvdb4\" (UniqueName: \"kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4\") pod \"console-7f56c8cd-xplcm\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.289211 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.294444 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c50f20c0-a383-4f6b-bfb2-407d0311697e-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-v5hpv\" (UID: \"c50f20c0-a383-4f6b-bfb2-407d0311697e\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.370042 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.447613 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx"] Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.505816 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.581588 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-krp57" event={"ID":"b669d096-a783-4c35-9bd5-a489346af9d8","Type":"ContainerStarted","Data":"91d1dc76991759955477f1bb725c4ffe4eb8e2f620e333dba8a80246cfb9c8b5"} Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.584468 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" event={"ID":"8eecf12e-205c-45b3-8be3-84dd5d0c6803","Type":"ContainerStarted","Data":"8ed6e9cd3f3cd0c2add24ab82b5fa037351b450c4bfcb21db0215e48033a6e9e"} Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.605168 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs"] Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.868922 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:49:49 crc kubenswrapper[4967]: I1121 15:49:49.961854 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv"] Nov 21 15:49:49 crc kubenswrapper[4967]: W1121 15:49:49.964152 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc50f20c0_a383_4f6b_bfb2_407d0311697e.slice/crio-ef3713f8f588c0aa00d0de41878ee52830393b938edaf53dddc69f7a7788a5de WatchSource:0}: Error finding container ef3713f8f588c0aa00d0de41878ee52830393b938edaf53dddc69f7a7788a5de: Status 404 returned error can't find the container with id ef3713f8f588c0aa00d0de41878ee52830393b938edaf53dddc69f7a7788a5de Nov 21 15:49:50 crc kubenswrapper[4967]: I1121 15:49:50.591636 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" event={"ID":"c50f20c0-a383-4f6b-bfb2-407d0311697e","Type":"ContainerStarted","Data":"ef3713f8f588c0aa00d0de41878ee52830393b938edaf53dddc69f7a7788a5de"} Nov 21 15:49:50 crc kubenswrapper[4967]: I1121 15:49:50.593300 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" event={"ID":"2a407648-607f-4dc4-a6b6-7ae364ae228b","Type":"ContainerStarted","Data":"a9427b1a7fac51e58a7a818bfa6eba5e0192a8ca330b2f7ac49c1618623d0225"} Nov 21 15:49:50 crc kubenswrapper[4967]: I1121 15:49:50.595100 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f56c8cd-xplcm" event={"ID":"5a8a3edb-6fe7-4597-8e99-2ac664634b00","Type":"ContainerStarted","Data":"bef07153af84578649ad213b08e3aff50b05f0d93e070ddd2aa06d5097230f2c"} Nov 21 15:49:50 crc kubenswrapper[4967]: I1121 15:49:50.595154 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f56c8cd-xplcm" event={"ID":"5a8a3edb-6fe7-4597-8e99-2ac664634b00","Type":"ContainerStarted","Data":"0d4114c86c121ccb89dc273650be76bf3640839d64ee12913c2cb17e31698e56"} Nov 21 15:49:50 crc kubenswrapper[4967]: I1121 15:49:50.623165 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-7f56c8cd-xplcm" podStartSLOduration=2.623147663 podStartE2EDuration="2.623147663s" podCreationTimestamp="2025-11-21 15:49:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:49:50.619078496 +0000 UTC m=+878.877599514" watchObservedRunningTime="2025-11-21 15:49:50.623147663 +0000 UTC m=+878.881668681" Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.627080 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" event={"ID":"8eecf12e-205c-45b3-8be3-84dd5d0c6803","Type":"ContainerStarted","Data":"b7d0a16f7d327ed69069290ce33c609d46e6dda76286479a7030a2336173e796"} Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.630126 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" event={"ID":"c50f20c0-a383-4f6b-bfb2-407d0311697e","Type":"ContainerStarted","Data":"1b693ff7d55244539aaf6a47fe9b40f09bd42b2cbf820111716bf068334053e0"} Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.630273 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.632646 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" event={"ID":"2a407648-607f-4dc4-a6b6-7ae364ae228b","Type":"ContainerStarted","Data":"8770d50d3f3dbd46de7e7f4d59938043a4ce9c47551b195de56236ca068d67d7"} Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.634175 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-krp57" event={"ID":"b669d096-a783-4c35-9bd5-a489346af9d8","Type":"ContainerStarted","Data":"42edee8f548cf7ca0affcf7659a73d295c1a61b8dd6c914123272f3d51a6a8ad"} Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.634355 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.648770 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" podStartSLOduration=2.372087028 podStartE2EDuration="4.648751993s" podCreationTimestamp="2025-11-21 15:49:48 +0000 UTC" firstStartedPulling="2025-11-21 15:49:49.966438895 +0000 UTC m=+878.224959903" lastFinishedPulling="2025-11-21 15:49:52.24310386 +0000 UTC m=+880.501624868" observedRunningTime="2025-11-21 15:49:52.643565284 +0000 UTC m=+880.902086292" watchObservedRunningTime="2025-11-21 15:49:52.648751993 +0000 UTC m=+880.907273001" Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.671616 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-krp57" podStartSLOduration=1.459915626 podStartE2EDuration="4.671597599s" podCreationTimestamp="2025-11-21 15:49:48 +0000 UTC" firstStartedPulling="2025-11-21 15:49:49.008956022 +0000 UTC m=+877.267477030" lastFinishedPulling="2025-11-21 15:49:52.220637995 +0000 UTC m=+880.479159003" observedRunningTime="2025-11-21 15:49:52.670688713 +0000 UTC m=+880.929209721" watchObservedRunningTime="2025-11-21 15:49:52.671597599 +0000 UTC m=+880.930118617" Nov 21 15:49:52 crc kubenswrapper[4967]: I1121 15:49:52.690691 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-cd9cs" podStartSLOduration=2.094226672 podStartE2EDuration="4.690670646s" podCreationTimestamp="2025-11-21 15:49:48 +0000 UTC" firstStartedPulling="2025-11-21 15:49:49.624225602 +0000 UTC m=+877.882746610" lastFinishedPulling="2025-11-21 15:49:52.220669576 +0000 UTC m=+880.479190584" observedRunningTime="2025-11-21 15:49:52.686900858 +0000 UTC m=+880.945421866" watchObservedRunningTime="2025-11-21 15:49:52.690670646 +0000 UTC m=+880.949191644" Nov 21 15:49:57 crc kubenswrapper[4967]: I1121 15:49:57.672661 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" event={"ID":"8eecf12e-205c-45b3-8be3-84dd5d0c6803","Type":"ContainerStarted","Data":"a78c9a27956ac6884c79cfd8b612e69da0042bd863b226e642375d33259281f6"} Nov 21 15:49:57 crc kubenswrapper[4967]: I1121 15:49:57.692388 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-fgnlx" podStartSLOduration=2.651427535 podStartE2EDuration="9.692371686s" podCreationTimestamp="2025-11-21 15:49:48 +0000 UTC" firstStartedPulling="2025-11-21 15:49:49.47955525 +0000 UTC m=+877.738076258" lastFinishedPulling="2025-11-21 15:49:56.520499401 +0000 UTC m=+884.779020409" observedRunningTime="2025-11-21 15:49:57.688781373 +0000 UTC m=+885.947302381" watchObservedRunningTime="2025-11-21 15:49:57.692371686 +0000 UTC m=+885.950892694" Nov 21 15:49:58 crc kubenswrapper[4967]: I1121 15:49:58.982433 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-krp57" Nov 21 15:49:59 crc kubenswrapper[4967]: I1121 15:49:59.371072 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:59 crc kubenswrapper[4967]: I1121 15:49:59.371375 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:59 crc kubenswrapper[4967]: I1121 15:49:59.376721 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:59 crc kubenswrapper[4967]: I1121 15:49:59.758812 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:49:59 crc kubenswrapper[4967]: I1121 15:49:59.811985 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:50:09 crc kubenswrapper[4967]: I1121 15:50:09.512150 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-v5hpv" Nov 21 15:50:24 crc kubenswrapper[4967]: I1121 15:50:24.851692 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-84b549b7f9-mhv2t" podUID="a588e30a-43fb-4d78-a420-db178082c222" containerName="console" containerID="cri-o://0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787" gracePeriod=15 Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.260907 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84b549b7f9-mhv2t_a588e30a-43fb-4d78-a420-db178082c222/console/0.log" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.260983 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382108 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382174 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382210 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382269 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382293 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382355 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d8xd\" (UniqueName: \"kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.382398 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle\") pod \"a588e30a-43fb-4d78-a420-db178082c222\" (UID: \"a588e30a-43fb-4d78-a420-db178082c222\") " Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.383042 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca" (OuterVolumeSpecName: "service-ca") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.383108 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config" (OuterVolumeSpecName: "console-config") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.383134 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.383482 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.387832 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.387903 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd" (OuterVolumeSpecName: "kube-api-access-7d8xd") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "kube-api-access-7d8xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.388109 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "a588e30a-43fb-4d78-a420-db178082c222" (UID: "a588e30a-43fb-4d78-a420-db178082c222"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484166 4967 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484427 4967 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484517 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d8xd\" (UniqueName: \"kubernetes.io/projected/a588e30a-43fb-4d78-a420-db178082c222-kube-api-access-7d8xd\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484580 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484632 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484690 4967 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a588e30a-43fb-4d78-a420-db178082c222-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.484746 4967 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a588e30a-43fb-4d78-a420-db178082c222-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.946799 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-84b549b7f9-mhv2t_a588e30a-43fb-4d78-a420-db178082c222/console/0.log" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.946863 4967 generic.go:334] "Generic (PLEG): container finished" podID="a588e30a-43fb-4d78-a420-db178082c222" containerID="0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787" exitCode=2 Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.946901 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b549b7f9-mhv2t" event={"ID":"a588e30a-43fb-4d78-a420-db178082c222","Type":"ContainerDied","Data":"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787"} Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.946938 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b549b7f9-mhv2t" event={"ID":"a588e30a-43fb-4d78-a420-db178082c222","Type":"ContainerDied","Data":"10cfbf5a9c015ee5803123b0a9c47b2b1de39905e00f0fb3d8ad10659dca124d"} Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.947153 4967 scope.go:117] "RemoveContainer" containerID="0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.947156 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b549b7f9-mhv2t" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.982987 4967 scope.go:117] "RemoveContainer" containerID="0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787" Nov 21 15:50:25 crc kubenswrapper[4967]: E1121 15:50:25.985772 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787\": container with ID starting with 0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787 not found: ID does not exist" containerID="0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.985857 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787"} err="failed to get container status \"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787\": rpc error: code = NotFound desc = could not find container \"0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787\": container with ID starting with 0c1b83cc32bbddde1646f421dd810d6bd5ad869aeca4d33be5ebc478374b4787 not found: ID does not exist" Nov 21 15:50:25 crc kubenswrapper[4967]: I1121 15:50:25.992070 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.004452 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-84b549b7f9-mhv2t"] Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.547432 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a588e30a-43fb-4d78-a420-db178082c222" path="/var/lib/kubelet/pods/a588e30a-43fb-4d78-a420-db178082c222/volumes" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.744633 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7"] Nov 21 15:50:26 crc kubenswrapper[4967]: E1121 15:50:26.745004 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a588e30a-43fb-4d78-a420-db178082c222" containerName="console" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.745026 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a588e30a-43fb-4d78-a420-db178082c222" containerName="console" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.745207 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a588e30a-43fb-4d78-a420-db178082c222" containerName="console" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.746430 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.748722 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.763072 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7"] Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.808744 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.809247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.809354 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztmb2\" (UniqueName: \"kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.910512 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.910570 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.910596 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztmb2\" (UniqueName: \"kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.911142 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.911408 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:26 crc kubenswrapper[4967]: I1121 15:50:26.928405 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztmb2\" (UniqueName: \"kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.062748 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.470523 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7"] Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.965087 4967 generic.go:334] "Generic (PLEG): container finished" podID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerID="43f6a0d3f957eecb17528c30d0d93892322cffd3065a25b713e66ae0bbe4cda6" exitCode=0 Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.965148 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" event={"ID":"b71c70ea-eebb-40fb-baef-5e993f014e89","Type":"ContainerDied","Data":"43f6a0d3f957eecb17528c30d0d93892322cffd3065a25b713e66ae0bbe4cda6"} Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.965463 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" event={"ID":"b71c70ea-eebb-40fb-baef-5e993f014e89","Type":"ContainerStarted","Data":"6f6ee3b375a67b96819fab601b5571f9fa63d89019a8ae096706f4c3fe8758cc"} Nov 21 15:50:27 crc kubenswrapper[4967]: I1121 15:50:27.966945 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:50:29 crc kubenswrapper[4967]: I1121 15:50:29.981952 4967 generic.go:334] "Generic (PLEG): container finished" podID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerID="c74fff8bc87fb0ad13981b9b5f80b9a73450cb6e5a385ccbfee969293ddf5074" exitCode=0 Nov 21 15:50:29 crc kubenswrapper[4967]: I1121 15:50:29.982043 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" event={"ID":"b71c70ea-eebb-40fb-baef-5e993f014e89","Type":"ContainerDied","Data":"c74fff8bc87fb0ad13981b9b5f80b9a73450cb6e5a385ccbfee969293ddf5074"} Nov 21 15:50:30 crc kubenswrapper[4967]: I1121 15:50:30.992230 4967 generic.go:334] "Generic (PLEG): container finished" podID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerID="8affb355b2115a17c5c0728791b16e237915380d2b7d931e8b84ca083503b235" exitCode=0 Nov 21 15:50:30 crc kubenswrapper[4967]: I1121 15:50:30.992275 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" event={"ID":"b71c70ea-eebb-40fb-baef-5e993f014e89","Type":"ContainerDied","Data":"8affb355b2115a17c5c0728791b16e237915380d2b7d931e8b84ca083503b235"} Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.258105 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.398884 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztmb2\" (UniqueName: \"kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2\") pod \"b71c70ea-eebb-40fb-baef-5e993f014e89\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.399039 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util\") pod \"b71c70ea-eebb-40fb-baef-5e993f014e89\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.399085 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle\") pod \"b71c70ea-eebb-40fb-baef-5e993f014e89\" (UID: \"b71c70ea-eebb-40fb-baef-5e993f014e89\") " Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.400069 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle" (OuterVolumeSpecName: "bundle") pod "b71c70ea-eebb-40fb-baef-5e993f014e89" (UID: "b71c70ea-eebb-40fb-baef-5e993f014e89"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.407515 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2" (OuterVolumeSpecName: "kube-api-access-ztmb2") pod "b71c70ea-eebb-40fb-baef-5e993f014e89" (UID: "b71c70ea-eebb-40fb-baef-5e993f014e89"). InnerVolumeSpecName "kube-api-access-ztmb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.430125 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util" (OuterVolumeSpecName: "util") pod "b71c70ea-eebb-40fb-baef-5e993f014e89" (UID: "b71c70ea-eebb-40fb-baef-5e993f014e89"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.500517 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztmb2\" (UniqueName: \"kubernetes.io/projected/b71c70ea-eebb-40fb-baef-5e993f014e89-kube-api-access-ztmb2\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.500550 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:32 crc kubenswrapper[4967]: I1121 15:50:32.500563 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b71c70ea-eebb-40fb-baef-5e993f014e89-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:50:33 crc kubenswrapper[4967]: I1121 15:50:33.005911 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" event={"ID":"b71c70ea-eebb-40fb-baef-5e993f014e89","Type":"ContainerDied","Data":"6f6ee3b375a67b96819fab601b5571f9fa63d89019a8ae096706f4c3fe8758cc"} Nov 21 15:50:33 crc kubenswrapper[4967]: I1121 15:50:33.005954 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f6ee3b375a67b96819fab601b5571f9fa63d89019a8ae096706f4c3fe8758cc" Nov 21 15:50:33 crc kubenswrapper[4967]: I1121 15:50:33.005987 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.278848 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz"] Nov 21 15:50:41 crc kubenswrapper[4967]: E1121 15:50:41.279730 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="pull" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.279747 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="pull" Nov 21 15:50:41 crc kubenswrapper[4967]: E1121 15:50:41.279786 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="extract" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.279795 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="extract" Nov 21 15:50:41 crc kubenswrapper[4967]: E1121 15:50:41.279807 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="util" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.279815 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="util" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.279974 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b71c70ea-eebb-40fb-baef-5e993f014e89" containerName="extract" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.280646 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.283110 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.283153 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.283299 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.284905 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-4j48j" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.286504 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.324877 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz"] Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.359873 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-apiservice-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.359936 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cbmv\" (UniqueName: \"kubernetes.io/projected/33fe3ce1-2592-438b-a9a0-8c55a47013d2-kube-api-access-6cbmv\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.359985 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-webhook-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.461847 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-apiservice-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.461895 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cbmv\" (UniqueName: \"kubernetes.io/projected/33fe3ce1-2592-438b-a9a0-8c55a47013d2-kube-api-access-6cbmv\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.461925 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-webhook-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.468672 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-webhook-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.470922 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/33fe3ce1-2592-438b-a9a0-8c55a47013d2-apiservice-cert\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.482065 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cbmv\" (UniqueName: \"kubernetes.io/projected/33fe3ce1-2592-438b-a9a0-8c55a47013d2-kube-api-access-6cbmv\") pod \"metallb-operator-controller-manager-77bbdf4fb9-hj2sz\" (UID: \"33fe3ce1-2592-438b-a9a0-8c55a47013d2\") " pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.602225 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.649932 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7"] Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.650968 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.652928 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-k64b6" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.653067 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.653080 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.664951 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-webhook-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.665002 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tbd7\" (UniqueName: \"kubernetes.io/projected/c3800b73-ab16-46a6-b24d-e96158d1deec-kube-api-access-9tbd7\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.665035 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-apiservice-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.679439 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7"] Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.767133 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-webhook-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.767194 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tbd7\" (UniqueName: \"kubernetes.io/projected/c3800b73-ab16-46a6-b24d-e96158d1deec-kube-api-access-9tbd7\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.767235 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-apiservice-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.791476 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-webhook-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.794943 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tbd7\" (UniqueName: \"kubernetes.io/projected/c3800b73-ab16-46a6-b24d-e96158d1deec-kube-api-access-9tbd7\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:41 crc kubenswrapper[4967]: I1121 15:50:41.795352 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3800b73-ab16-46a6-b24d-e96158d1deec-apiservice-cert\") pod \"metallb-operator-webhook-server-7d67c6f6df-vjpg7\" (UID: \"c3800b73-ab16-46a6-b24d-e96158d1deec\") " pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:42 crc kubenswrapper[4967]: I1121 15:50:42.021867 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:42 crc kubenswrapper[4967]: I1121 15:50:42.190654 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz"] Nov 21 15:50:42 crc kubenswrapper[4967]: I1121 15:50:42.281961 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7"] Nov 21 15:50:43 crc kubenswrapper[4967]: I1121 15:50:43.084375 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" event={"ID":"33fe3ce1-2592-438b-a9a0-8c55a47013d2","Type":"ContainerStarted","Data":"96884e7d58801305c721009e23561ebd8985530df9a6c500352d3703b5b10f0d"} Nov 21 15:50:43 crc kubenswrapper[4967]: I1121 15:50:43.087256 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" event={"ID":"c3800b73-ab16-46a6-b24d-e96158d1deec","Type":"ContainerStarted","Data":"7c73d58f7d444c363e9ffb319b40b04d2b002660e16208bec25f94e531718cff"} Nov 21 15:50:46 crc kubenswrapper[4967]: I1121 15:50:46.522043 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:50:46 crc kubenswrapper[4967]: I1121 15:50:46.522745 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.129255 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" event={"ID":"33fe3ce1-2592-438b-a9a0-8c55a47013d2","Type":"ContainerStarted","Data":"60cce144496a52cf90d0f8dbf8c2f73aca7eb054d28f04ce2fe28c07d5b0fb97"} Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.129807 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.131573 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" event={"ID":"c3800b73-ab16-46a6-b24d-e96158d1deec","Type":"ContainerStarted","Data":"99a3077a54ab7794a2eb61226eaf13d46662dc9945e122c62d63d67e661d4abd"} Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.132607 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.205293 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" podStartSLOduration=2.020687028 podStartE2EDuration="7.205271958s" podCreationTimestamp="2025-11-21 15:50:41 +0000 UTC" firstStartedPulling="2025-11-21 15:50:42.288169253 +0000 UTC m=+930.546690261" lastFinishedPulling="2025-11-21 15:50:47.472754193 +0000 UTC m=+935.731275191" observedRunningTime="2025-11-21 15:50:48.202592191 +0000 UTC m=+936.461113199" watchObservedRunningTime="2025-11-21 15:50:48.205271958 +0000 UTC m=+936.463792966" Nov 21 15:50:48 crc kubenswrapper[4967]: I1121 15:50:48.209847 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" podStartSLOduration=1.9516370969999999 podStartE2EDuration="7.209818289s" podCreationTimestamp="2025-11-21 15:50:41 +0000 UTC" firstStartedPulling="2025-11-21 15:50:42.210490104 +0000 UTC m=+930.469011112" lastFinishedPulling="2025-11-21 15:50:47.468671296 +0000 UTC m=+935.727192304" observedRunningTime="2025-11-21 15:50:48.174802463 +0000 UTC m=+936.433323501" watchObservedRunningTime="2025-11-21 15:50:48.209818289 +0000 UTC m=+936.468339297" Nov 21 15:51:02 crc kubenswrapper[4967]: I1121 15:51:02.033990 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-7d67c6f6df-vjpg7" Nov 21 15:51:16 crc kubenswrapper[4967]: I1121 15:51:16.522502 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:51:16 crc kubenswrapper[4967]: I1121 15:51:16.523079 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:51:21 crc kubenswrapper[4967]: I1121 15:51:21.605141 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-77bbdf4fb9-hj2sz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.475332 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-knjxz"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.478859 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.481148 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.481353 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5crv9" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.483480 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.497050 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.498216 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.502232 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.502432 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.559779 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-reloader\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.559828 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7kn\" (UniqueName: \"kubernetes.io/projected/f9285829-036c-4010-b85e-6fcec9f6ce0e-kube-api-access-fp7kn\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.559897 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.559921 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-sockets\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.559976 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-startup\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.560000 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zbl4\" (UniqueName: \"kubernetes.io/projected/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-kube-api-access-6zbl4\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.560022 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.560275 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-conf\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.560420 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics-certs\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.598818 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-cz2xh"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.600674 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.607708 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.607791 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.607727 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.607844 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-2974b" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.623895 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-x7rr6"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.626052 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.630886 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.636287 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-x7rr6"] Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663513 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scqjl\" (UniqueName: \"kubernetes.io/projected/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-kube-api-access-scqjl\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663569 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663592 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663616 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-sockets\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663645 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-cert\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663668 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/52ab67e9-1c78-497d-aa00-18a29052c0bd-metallb-excludel2\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663703 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663724 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.663782 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-startup\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664062 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zbl4\" (UniqueName: \"kubernetes.io/projected/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-kube-api-access-6zbl4\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664086 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-conf\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664236 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics-certs\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664269 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-reloader\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664289 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcsmn\" (UniqueName: \"kubernetes.io/projected/52ab67e9-1c78-497d-aa00-18a29052c0bd-kube-api-access-dcsmn\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664325 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7kn\" (UniqueName: \"kubernetes.io/projected/f9285829-036c-4010-b85e-6fcec9f6ce0e-kube-api-access-fp7kn\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.664963 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.665159 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-sockets\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.665885 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-conf\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.665912 4967 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.665998 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert podName:1fa4e173-1be2-4f7d-82e8-d607e1481bcd nodeName:}" failed. No retries permitted until 2025-11-21 15:51:23.165971743 +0000 UTC m=+971.424492841 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert") pod "frr-k8s-webhook-server-6998585d5-wzh6s" (UID: "1fa4e173-1be2-4f7d-82e8-d607e1481bcd") : secret "frr-k8s-webhook-server-cert" not found Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.666558 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/f9285829-036c-4010-b85e-6fcec9f6ce0e-frr-startup\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.667164 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/f9285829-036c-4010-b85e-6fcec9f6ce0e-reloader\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.672412 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f9285829-036c-4010-b85e-6fcec9f6ce0e-metrics-certs\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.683221 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zbl4\" (UniqueName: \"kubernetes.io/projected/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-kube-api-access-6zbl4\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.691236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7kn\" (UniqueName: \"kubernetes.io/projected/f9285829-036c-4010-b85e-6fcec9f6ce0e-kube-api-access-fp7kn\") pod \"frr-k8s-knjxz\" (UID: \"f9285829-036c-4010-b85e-6fcec9f6ce0e\") " pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.766782 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcsmn\" (UniqueName: \"kubernetes.io/projected/52ab67e9-1c78-497d-aa00-18a29052c0bd-kube-api-access-dcsmn\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.766903 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scqjl\" (UniqueName: \"kubernetes.io/projected/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-kube-api-access-scqjl\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.766952 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.766998 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-cert\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.767032 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/52ab67e9-1c78-497d-aa00-18a29052c0bd-metallb-excludel2\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.767071 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.767096 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.767274 4967 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.767382 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs podName:0007ae58-10dd-45bc-85cb-2a74a5cca4e5 nodeName:}" failed. No retries permitted until 2025-11-21 15:51:23.267354153 +0000 UTC m=+971.525875161 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs") pod "controller-6c7b4b5f48-x7rr6" (UID: "0007ae58-10dd-45bc-85cb-2a74a5cca4e5") : secret "controller-certs-secret" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.768199 4967 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.768242 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist podName:52ab67e9-1c78-497d-aa00-18a29052c0bd nodeName:}" failed. No retries permitted until 2025-11-21 15:51:23.268230978 +0000 UTC m=+971.526751986 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist") pod "speaker-cz2xh" (UID: "52ab67e9-1c78-497d-aa00-18a29052c0bd") : secret "metallb-memberlist" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.768512 4967 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 21 15:51:22 crc kubenswrapper[4967]: E1121 15:51:22.768628 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs podName:52ab67e9-1c78-497d-aa00-18a29052c0bd nodeName:}" failed. No retries permitted until 2025-11-21 15:51:23.268601239 +0000 UTC m=+971.527122247 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs") pod "speaker-cz2xh" (UID: "52ab67e9-1c78-497d-aa00-18a29052c0bd") : secret "speaker-certs-secret" not found Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.770917 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/52ab67e9-1c78-497d-aa00-18a29052c0bd-metallb-excludel2\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.773662 4967 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.783914 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-cert\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.803003 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scqjl\" (UniqueName: \"kubernetes.io/projected/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-kube-api-access-scqjl\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.803953 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcsmn\" (UniqueName: \"kubernetes.io/projected/52ab67e9-1c78-497d-aa00-18a29052c0bd-kube-api-access-dcsmn\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:22 crc kubenswrapper[4967]: I1121 15:51:22.814119 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.173203 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.177419 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1fa4e173-1be2-4f7d-82e8-d607e1481bcd-cert\") pod \"frr-k8s-webhook-server-6998585d5-wzh6s\" (UID: \"1fa4e173-1be2-4f7d-82e8-d607e1481bcd\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.275005 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.275087 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.275111 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:23 crc kubenswrapper[4967]: E1121 15:51:23.275176 4967 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 15:51:23 crc kubenswrapper[4967]: E1121 15:51:23.275263 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist podName:52ab67e9-1c78-497d-aa00-18a29052c0bd nodeName:}" failed. No retries permitted until 2025-11-21 15:51:24.27524003 +0000 UTC m=+972.533761118 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist") pod "speaker-cz2xh" (UID: "52ab67e9-1c78-497d-aa00-18a29052c0bd") : secret "metallb-memberlist" not found Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.279421 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-metrics-certs\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.280897 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0007ae58-10dd-45bc-85cb-2a74a5cca4e5-metrics-certs\") pod \"controller-6c7b4b5f48-x7rr6\" (UID: \"0007ae58-10dd-45bc-85cb-2a74a5cca4e5\") " pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.365756 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"fee3b5209e7f737664771ceb74bfebf4e9d4c00563e63cf3d8050eae8da1e789"} Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.418710 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.550842 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.786035 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-x7rr6"] Nov 21 15:51:23 crc kubenswrapper[4967]: I1121 15:51:23.843186 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s"] Nov 21 15:51:24 crc kubenswrapper[4967]: I1121 15:51:24.304407 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:24 crc kubenswrapper[4967]: E1121 15:51:24.305008 4967 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 15:51:24 crc kubenswrapper[4967]: E1121 15:51:24.305078 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist podName:52ab67e9-1c78-497d-aa00-18a29052c0bd nodeName:}" failed. No retries permitted until 2025-11-21 15:51:26.305058728 +0000 UTC m=+974.563579736 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist") pod "speaker-cz2xh" (UID: "52ab67e9-1c78-497d-aa00-18a29052c0bd") : secret "metallb-memberlist" not found Nov 21 15:51:24 crc kubenswrapper[4967]: I1121 15:51:24.372767 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-x7rr6" event={"ID":"0007ae58-10dd-45bc-85cb-2a74a5cca4e5","Type":"ContainerStarted","Data":"69906e96dd61c1479455a242ccf2de4d646a1fcd3eee99264bb0a9986a1d77a1"} Nov 21 15:51:24 crc kubenswrapper[4967]: I1121 15:51:24.372828 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-x7rr6" event={"ID":"0007ae58-10dd-45bc-85cb-2a74a5cca4e5","Type":"ContainerStarted","Data":"62a1e157e727216e4d98ea89a2b44555afd769a1b48fd0d15c50b77474f1f6a8"} Nov 21 15:51:24 crc kubenswrapper[4967]: I1121 15:51:24.374661 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" event={"ID":"1fa4e173-1be2-4f7d-82e8-d607e1481bcd","Type":"ContainerStarted","Data":"110c054f550b453ba0c5026b327011a9b354d6d7581b557992eee0172c631677"} Nov 21 15:51:25 crc kubenswrapper[4967]: I1121 15:51:25.382485 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-x7rr6" event={"ID":"0007ae58-10dd-45bc-85cb-2a74a5cca4e5","Type":"ContainerStarted","Data":"5fc2c8db6446d77d0abe1fb44db60808d432bb2a319b2e1aaa2d0813859d53f6"} Nov 21 15:51:26 crc kubenswrapper[4967]: I1121 15:51:26.342431 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:26 crc kubenswrapper[4967]: I1121 15:51:26.349008 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/52ab67e9-1c78-497d-aa00-18a29052c0bd-memberlist\") pod \"speaker-cz2xh\" (UID: \"52ab67e9-1c78-497d-aa00-18a29052c0bd\") " pod="metallb-system/speaker-cz2xh" Nov 21 15:51:26 crc kubenswrapper[4967]: I1121 15:51:26.396442 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:26 crc kubenswrapper[4967]: I1121 15:51:26.424200 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-x7rr6" podStartSLOduration=4.4241818760000005 podStartE2EDuration="4.424181876s" podCreationTimestamp="2025-11-21 15:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:51:26.419418539 +0000 UTC m=+974.677939557" watchObservedRunningTime="2025-11-21 15:51:26.424181876 +0000 UTC m=+974.682702884" Nov 21 15:51:26 crc kubenswrapper[4967]: I1121 15:51:26.522165 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-cz2xh" Nov 21 15:51:27 crc kubenswrapper[4967]: I1121 15:51:27.404414 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cz2xh" event={"ID":"52ab67e9-1c78-497d-aa00-18a29052c0bd","Type":"ContainerStarted","Data":"2d08bdfa6e29db37ccabf572206390848a9c3566555606503e131559767a2733"} Nov 21 15:51:27 crc kubenswrapper[4967]: I1121 15:51:27.404781 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cz2xh" event={"ID":"52ab67e9-1c78-497d-aa00-18a29052c0bd","Type":"ContainerStarted","Data":"57a5d26261c99119e02b514c9fd1142d61e1a4a7de5ea1856e4ddde6e2bb8fd7"} Nov 21 15:51:28 crc kubenswrapper[4967]: I1121 15:51:28.413894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-cz2xh" event={"ID":"52ab67e9-1c78-497d-aa00-18a29052c0bd","Type":"ContainerStarted","Data":"346e43dc83290651507daa353558181d8b8615d78d94bf9f0df1e81b3dcaa9c0"} Nov 21 15:51:28 crc kubenswrapper[4967]: I1121 15:51:28.414045 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-cz2xh" Nov 21 15:51:28 crc kubenswrapper[4967]: I1121 15:51:28.437482 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-cz2xh" podStartSLOduration=6.437458061 podStartE2EDuration="6.437458061s" podCreationTimestamp="2025-11-21 15:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:51:28.429690638 +0000 UTC m=+976.688211646" watchObservedRunningTime="2025-11-21 15:51:28.437458061 +0000 UTC m=+976.695979069" Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.470725 4967 generic.go:334] "Generic (PLEG): container finished" podID="f9285829-036c-4010-b85e-6fcec9f6ce0e" containerID="dba201e91a96ae770f586b6997ad55781e56b9e01160b386673493518c5b26a0" exitCode=0 Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.470784 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerDied","Data":"dba201e91a96ae770f586b6997ad55781e56b9e01160b386673493518c5b26a0"} Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.473827 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" event={"ID":"1fa4e173-1be2-4f7d-82e8-d607e1481bcd","Type":"ContainerStarted","Data":"73dfd92520762d5bf4c8fb95083446462cb0aad665acd8f52d525a77c197991d"} Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.474289 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.514708 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" podStartSLOduration=2.739591926 podStartE2EDuration="14.514681953s" podCreationTimestamp="2025-11-21 15:51:22 +0000 UTC" firstStartedPulling="2025-11-21 15:51:23.853813557 +0000 UTC m=+972.112334565" lastFinishedPulling="2025-11-21 15:51:35.628903584 +0000 UTC m=+983.887424592" observedRunningTime="2025-11-21 15:51:36.511025678 +0000 UTC m=+984.769546726" watchObservedRunningTime="2025-11-21 15:51:36.514681953 +0000 UTC m=+984.773202981" Nov 21 15:51:36 crc kubenswrapper[4967]: I1121 15:51:36.525349 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-cz2xh" Nov 21 15:51:37 crc kubenswrapper[4967]: I1121 15:51:37.494987 4967 generic.go:334] "Generic (PLEG): container finished" podID="f9285829-036c-4010-b85e-6fcec9f6ce0e" containerID="81232280a9c46591c8c2887ef790989a539cc947e28895a4d68fad729b16c744" exitCode=0 Nov 21 15:51:37 crc kubenswrapper[4967]: I1121 15:51:37.495184 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerDied","Data":"81232280a9c46591c8c2887ef790989a539cc947e28895a4d68fad729b16c744"} Nov 21 15:51:38 crc kubenswrapper[4967]: I1121 15:51:38.504448 4967 generic.go:334] "Generic (PLEG): container finished" podID="f9285829-036c-4010-b85e-6fcec9f6ce0e" containerID="d1a2259f020ee1c7b209996b1664e5db00432e1db908005c40f7a414f5a2b6a2" exitCode=0 Nov 21 15:51:38 crc kubenswrapper[4967]: I1121 15:51:38.504530 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerDied","Data":"d1a2259f020ee1c7b209996b1664e5db00432e1db908005c40f7a414f5a2b6a2"} Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.262979 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.264503 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.269521 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-6mw8x" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.269628 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.269708 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.288189 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.405498 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwdkf\" (UniqueName: \"kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf\") pod \"openstack-operator-index-jv99k\" (UID: \"d234a5e6-8be8-4fdb-95af-fb2cf579066c\") " pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.506708 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwdkf\" (UniqueName: \"kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf\") pod \"openstack-operator-index-jv99k\" (UID: \"d234a5e6-8be8-4fdb-95af-fb2cf579066c\") " pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.520268 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"2ab193f7a9dacab1fc7844c6d8acd0130ef89503809a5124989e8e3a2ad8ab23"} Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.520357 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"f29d4759cdbafdf8984f32af092cf95cb3344fb83cacc0938de0e78f05f37f43"} Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.520369 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"08a11ecdc20c168f4761b3b22a7ab2b507b317d33522e61039e37339ec134da1"} Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.520380 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"f749de6491eef7bc3fed0c07fa535d23ffb25f0becc189d0d38b342cb6e5e79c"} Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.536570 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwdkf\" (UniqueName: \"kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf\") pod \"openstack-operator-index-jv99k\" (UID: \"d234a5e6-8be8-4fdb-95af-fb2cf579066c\") " pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:39 crc kubenswrapper[4967]: I1121 15:51:39.588998 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:40 crc kubenswrapper[4967]: I1121 15:51:40.025683 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:40 crc kubenswrapper[4967]: W1121 15:51:40.050156 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd234a5e6_8be8_4fdb_95af_fb2cf579066c.slice/crio-dcb8ba375538fba7e88205692fa4a4e3e83d0ff489a0adeb411b725e787239ff WatchSource:0}: Error finding container dcb8ba375538fba7e88205692fa4a4e3e83d0ff489a0adeb411b725e787239ff: Status 404 returned error can't find the container with id dcb8ba375538fba7e88205692fa4a4e3e83d0ff489a0adeb411b725e787239ff Nov 21 15:51:40 crc kubenswrapper[4967]: I1121 15:51:40.553540 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"67e5a418dff9a54cb3d2109841bfb9636608e614df897c669b77027826be2f91"} Nov 21 15:51:40 crc kubenswrapper[4967]: I1121 15:51:40.553595 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-knjxz" event={"ID":"f9285829-036c-4010-b85e-6fcec9f6ce0e","Type":"ContainerStarted","Data":"d351a8a1c22b1bb5dc9aa99142d15e83c2552c8c552f29ef43c7935b7379f558"} Nov 21 15:51:40 crc kubenswrapper[4967]: I1121 15:51:40.553609 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jv99k" event={"ID":"d234a5e6-8be8-4fdb-95af-fb2cf579066c","Type":"ContainerStarted","Data":"dcb8ba375538fba7e88205692fa4a4e3e83d0ff489a0adeb411b725e787239ff"} Nov 21 15:51:40 crc kubenswrapper[4967]: I1121 15:51:40.576712 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-knjxz" podStartSLOduration=6.254141161 podStartE2EDuration="18.576692582s" podCreationTimestamp="2025-11-21 15:51:22 +0000 UTC" firstStartedPulling="2025-11-21 15:51:23.281030646 +0000 UTC m=+971.539551654" lastFinishedPulling="2025-11-21 15:51:35.603582067 +0000 UTC m=+983.862103075" observedRunningTime="2025-11-21 15:51:40.570469213 +0000 UTC m=+988.828990231" watchObservedRunningTime="2025-11-21 15:51:40.576692582 +0000 UTC m=+988.835213590" Nov 21 15:51:41 crc kubenswrapper[4967]: I1121 15:51:41.554626 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:42 crc kubenswrapper[4967]: I1121 15:51:42.629583 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:42 crc kubenswrapper[4967]: I1121 15:51:42.814746 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:42 crc kubenswrapper[4967]: I1121 15:51:42.851840 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.242289 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-bq7fn"] Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.243582 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.249131 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bq7fn"] Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.375224 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srnvw\" (UniqueName: \"kubernetes.io/projected/f5c26992-d25c-48e3-97f2-4260d4489c53-kube-api-access-srnvw\") pod \"openstack-operator-index-bq7fn\" (UID: \"f5c26992-d25c-48e3-97f2-4260d4489c53\") " pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.477733 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srnvw\" (UniqueName: \"kubernetes.io/projected/f5c26992-d25c-48e3-97f2-4260d4489c53-kube-api-access-srnvw\") pod \"openstack-operator-index-bq7fn\" (UID: \"f5c26992-d25c-48e3-97f2-4260d4489c53\") " pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.498915 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srnvw\" (UniqueName: \"kubernetes.io/projected/f5c26992-d25c-48e3-97f2-4260d4489c53-kube-api-access-srnvw\") pod \"openstack-operator-index-bq7fn\" (UID: \"f5c26992-d25c-48e3-97f2-4260d4489c53\") " pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.558764 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-x7rr6" Nov 21 15:51:43 crc kubenswrapper[4967]: I1121 15:51:43.572618 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:44 crc kubenswrapper[4967]: I1121 15:51:44.020674 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bq7fn"] Nov 21 15:51:44 crc kubenswrapper[4967]: W1121 15:51:44.031516 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5c26992_d25c_48e3_97f2_4260d4489c53.slice/crio-da9b74d8ca2accf8d5b3e107aa5d3dfa2c6fe6c58cf10958ed49a627450474a0 WatchSource:0}: Error finding container da9b74d8ca2accf8d5b3e107aa5d3dfa2c6fe6c58cf10958ed49a627450474a0: Status 404 returned error can't find the container with id da9b74d8ca2accf8d5b3e107aa5d3dfa2c6fe6c58cf10958ed49a627450474a0 Nov 21 15:51:44 crc kubenswrapper[4967]: I1121 15:51:44.580001 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bq7fn" event={"ID":"f5c26992-d25c-48e3-97f2-4260d4489c53","Type":"ContainerStarted","Data":"da9b74d8ca2accf8d5b3e107aa5d3dfa2c6fe6c58cf10958ed49a627450474a0"} Nov 21 15:51:46 crc kubenswrapper[4967]: I1121 15:51:46.522549 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:51:46 crc kubenswrapper[4967]: I1121 15:51:46.523194 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:51:46 crc kubenswrapper[4967]: I1121 15:51:46.523256 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:51:46 crc kubenswrapper[4967]: I1121 15:51:46.524049 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:51:46 crc kubenswrapper[4967]: I1121 15:51:46.524112 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24" gracePeriod=600 Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.604188 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bq7fn" event={"ID":"f5c26992-d25c-48e3-97f2-4260d4489c53","Type":"ContainerStarted","Data":"459acf5d0e1b20dc481979261e9a4b47d300bb2899c2e65264ec607e16d423bd"} Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.608361 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24" exitCode=0 Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.608431 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24"} Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.608712 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba"} Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.608734 4967 scope.go:117] "RemoveContainer" containerID="1d2361c88740d2f7915ba1040ce6a3af440c8b0cf78d9c3df36a544e55b9adc8" Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.611548 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jv99k" event={"ID":"d234a5e6-8be8-4fdb-95af-fb2cf579066c","Type":"ContainerStarted","Data":"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35"} Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.611642 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-jv99k" podUID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" containerName="registry-server" containerID="cri-o://7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35" gracePeriod=2 Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.626026 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-bq7fn" podStartSLOduration=1.751367794 podStartE2EDuration="4.626001178s" podCreationTimestamp="2025-11-21 15:51:43 +0000 UTC" firstStartedPulling="2025-11-21 15:51:44.033830522 +0000 UTC m=+992.292351530" lastFinishedPulling="2025-11-21 15:51:46.908463896 +0000 UTC m=+995.166984914" observedRunningTime="2025-11-21 15:51:47.617383121 +0000 UTC m=+995.875904129" watchObservedRunningTime="2025-11-21 15:51:47.626001178 +0000 UTC m=+995.884522186" Nov 21 15:51:47 crc kubenswrapper[4967]: I1121 15:51:47.635893 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-jv99k" podStartSLOduration=1.765968894 podStartE2EDuration="8.635875941s" podCreationTimestamp="2025-11-21 15:51:39 +0000 UTC" firstStartedPulling="2025-11-21 15:51:40.054161017 +0000 UTC m=+988.312682025" lastFinishedPulling="2025-11-21 15:51:46.924068064 +0000 UTC m=+995.182589072" observedRunningTime="2025-11-21 15:51:47.630753294 +0000 UTC m=+995.889274292" watchObservedRunningTime="2025-11-21 15:51:47.635875941 +0000 UTC m=+995.894396969" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.036629 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.156363 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwdkf\" (UniqueName: \"kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf\") pod \"d234a5e6-8be8-4fdb-95af-fb2cf579066c\" (UID: \"d234a5e6-8be8-4fdb-95af-fb2cf579066c\") " Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.162349 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf" (OuterVolumeSpecName: "kube-api-access-rwdkf") pod "d234a5e6-8be8-4fdb-95af-fb2cf579066c" (UID: "d234a5e6-8be8-4fdb-95af-fb2cf579066c"). InnerVolumeSpecName "kube-api-access-rwdkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.258457 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwdkf\" (UniqueName: \"kubernetes.io/projected/d234a5e6-8be8-4fdb-95af-fb2cf579066c-kube-api-access-rwdkf\") on node \"crc\" DevicePath \"\"" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.619860 4967 generic.go:334] "Generic (PLEG): container finished" podID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" containerID="7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35" exitCode=0 Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.620178 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jv99k" event={"ID":"d234a5e6-8be8-4fdb-95af-fb2cf579066c","Type":"ContainerDied","Data":"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35"} Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.620212 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-jv99k" event={"ID":"d234a5e6-8be8-4fdb-95af-fb2cf579066c","Type":"ContainerDied","Data":"dcb8ba375538fba7e88205692fa4a4e3e83d0ff489a0adeb411b725e787239ff"} Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.620230 4967 scope.go:117] "RemoveContainer" containerID="7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.620330 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-jv99k" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.640030 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.643051 4967 scope.go:117] "RemoveContainer" containerID="7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35" Nov 21 15:51:48 crc kubenswrapper[4967]: E1121 15:51:48.643540 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35\": container with ID starting with 7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35 not found: ID does not exist" containerID="7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.643580 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35"} err="failed to get container status \"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35\": rpc error: code = NotFound desc = could not find container \"7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35\": container with ID starting with 7e5a501f00700ce9b175099e20ca543d7ac11bae4dfe57d7ba221ceee36efc35 not found: ID does not exist" Nov 21 15:51:48 crc kubenswrapper[4967]: I1121 15:51:48.645717 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-jv99k"] Nov 21 15:51:50 crc kubenswrapper[4967]: I1121 15:51:50.546182 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" path="/var/lib/kubelet/pods/d234a5e6-8be8-4fdb-95af-fb2cf579066c/volumes" Nov 21 15:51:52 crc kubenswrapper[4967]: I1121 15:51:52.817398 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-knjxz" Nov 21 15:51:53 crc kubenswrapper[4967]: I1121 15:51:53.425351 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-wzh6s" Nov 21 15:51:53 crc kubenswrapper[4967]: I1121 15:51:53.572929 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:53 crc kubenswrapper[4967]: I1121 15:51:53.573324 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:53 crc kubenswrapper[4967]: I1121 15:51:53.606889 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:53 crc kubenswrapper[4967]: I1121 15:51:53.682381 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-bq7fn" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.668400 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc"] Nov 21 15:51:55 crc kubenswrapper[4967]: E1121 15:51:55.669246 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" containerName="registry-server" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.669262 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" containerName="registry-server" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.669458 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d234a5e6-8be8-4fdb-95af-fb2cf579066c" containerName="registry-server" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.670761 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.673137 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-h9rwz" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.679567 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc"] Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.788594 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.788658 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.788821 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcb6z\" (UniqueName: \"kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.890489 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcb6z\" (UniqueName: \"kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.890820 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.890916 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.891278 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.891437 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.909843 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcb6z\" (UniqueName: \"kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z\") pod \"82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:55 crc kubenswrapper[4967]: I1121 15:51:55.985621 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:51:56 crc kubenswrapper[4967]: I1121 15:51:56.363683 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc"] Nov 21 15:51:56 crc kubenswrapper[4967]: W1121 15:51:56.368407 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod721e9c44_afb3_47fb_979b_10e52d1c5acc.slice/crio-f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f WatchSource:0}: Error finding container f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f: Status 404 returned error can't find the container with id f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f Nov 21 15:51:56 crc kubenswrapper[4967]: I1121 15:51:56.681410 4967 generic.go:334] "Generic (PLEG): container finished" podID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerID="94ea3681b21addff0d2a04ee424cfa1888942f9c692e8a965eac79cc7c54fb60" exitCode=0 Nov 21 15:51:56 crc kubenswrapper[4967]: I1121 15:51:56.681674 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" event={"ID":"721e9c44-afb3-47fb-979b-10e52d1c5acc","Type":"ContainerDied","Data":"94ea3681b21addff0d2a04ee424cfa1888942f9c692e8a965eac79cc7c54fb60"} Nov 21 15:51:56 crc kubenswrapper[4967]: I1121 15:51:56.681699 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" event={"ID":"721e9c44-afb3-47fb-979b-10e52d1c5acc","Type":"ContainerStarted","Data":"f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f"} Nov 21 15:51:57 crc kubenswrapper[4967]: I1121 15:51:57.690556 4967 generic.go:334] "Generic (PLEG): container finished" podID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerID="ec4165dfe830f8c503873726076d2fe3587e19a077b9fb6727cabccec83fa58e" exitCode=0 Nov 21 15:51:57 crc kubenswrapper[4967]: I1121 15:51:57.690678 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" event={"ID":"721e9c44-afb3-47fb-979b-10e52d1c5acc","Type":"ContainerDied","Data":"ec4165dfe830f8c503873726076d2fe3587e19a077b9fb6727cabccec83fa58e"} Nov 21 15:51:58 crc kubenswrapper[4967]: I1121 15:51:58.706712 4967 generic.go:334] "Generic (PLEG): container finished" podID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerID="3f0732e88f5473489c05c61235d74f64d4fe087196c72af5aa8564cf711d5815" exitCode=0 Nov 21 15:51:58 crc kubenswrapper[4967]: I1121 15:51:58.706831 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" event={"ID":"721e9c44-afb3-47fb-979b-10e52d1c5acc","Type":"ContainerDied","Data":"3f0732e88f5473489c05c61235d74f64d4fe087196c72af5aa8564cf711d5815"} Nov 21 15:51:59 crc kubenswrapper[4967]: I1121 15:51:59.996902 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.163026 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util\") pod \"721e9c44-afb3-47fb-979b-10e52d1c5acc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.163089 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qcb6z\" (UniqueName: \"kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z\") pod \"721e9c44-afb3-47fb-979b-10e52d1c5acc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.163123 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle\") pod \"721e9c44-afb3-47fb-979b-10e52d1c5acc\" (UID: \"721e9c44-afb3-47fb-979b-10e52d1c5acc\") " Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.163894 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle" (OuterVolumeSpecName: "bundle") pod "721e9c44-afb3-47fb-979b-10e52d1c5acc" (UID: "721e9c44-afb3-47fb-979b-10e52d1c5acc"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.168229 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z" (OuterVolumeSpecName: "kube-api-access-qcb6z") pod "721e9c44-afb3-47fb-979b-10e52d1c5acc" (UID: "721e9c44-afb3-47fb-979b-10e52d1c5acc"). InnerVolumeSpecName "kube-api-access-qcb6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.176279 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util" (OuterVolumeSpecName: "util") pod "721e9c44-afb3-47fb-979b-10e52d1c5acc" (UID: "721e9c44-afb3-47fb-979b-10e52d1c5acc"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.264933 4967 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-util\") on node \"crc\" DevicePath \"\"" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.264972 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qcb6z\" (UniqueName: \"kubernetes.io/projected/721e9c44-afb3-47fb-979b-10e52d1c5acc-kube-api-access-qcb6z\") on node \"crc\" DevicePath \"\"" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.264989 4967 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/721e9c44-afb3-47fb-979b-10e52d1c5acc-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.726813 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" event={"ID":"721e9c44-afb3-47fb-979b-10e52d1c5acc","Type":"ContainerDied","Data":"f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f"} Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.726853 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f05cd8f96cb683418e33bf371d83c7d8467b7679f0feb324463ab0d7ffa9e20f" Nov 21 15:52:00 crc kubenswrapper[4967]: I1121 15:52:00.726945 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.172329 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg"] Nov 21 15:52:03 crc kubenswrapper[4967]: E1121 15:52:03.173102 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="util" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.173118 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="util" Nov 21 15:52:03 crc kubenswrapper[4967]: E1121 15:52:03.173140 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="extract" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.173149 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="extract" Nov 21 15:52:03 crc kubenswrapper[4967]: E1121 15:52:03.173185 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="pull" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.173194 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="pull" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.173435 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="721e9c44-afb3-47fb-979b-10e52d1c5acc" containerName="extract" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.174587 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.176509 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-lchzv" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.207084 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg"] Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.314231 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f269c\" (UniqueName: \"kubernetes.io/projected/073aff39-0ebc-4283-9a05-ec6b8e0abbd5-kube-api-access-f269c\") pod \"openstack-operator-controller-operator-5c6987f5c-tljdg\" (UID: \"073aff39-0ebc-4283-9a05-ec6b8e0abbd5\") " pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.415609 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f269c\" (UniqueName: \"kubernetes.io/projected/073aff39-0ebc-4283-9a05-ec6b8e0abbd5-kube-api-access-f269c\") pod \"openstack-operator-controller-operator-5c6987f5c-tljdg\" (UID: \"073aff39-0ebc-4283-9a05-ec6b8e0abbd5\") " pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.441104 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f269c\" (UniqueName: \"kubernetes.io/projected/073aff39-0ebc-4283-9a05-ec6b8e0abbd5-kube-api-access-f269c\") pod \"openstack-operator-controller-operator-5c6987f5c-tljdg\" (UID: \"073aff39-0ebc-4283-9a05-ec6b8e0abbd5\") " pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.510367 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:03 crc kubenswrapper[4967]: I1121 15:52:03.965678 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg"] Nov 21 15:52:04 crc kubenswrapper[4967]: I1121 15:52:04.757498 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" event={"ID":"073aff39-0ebc-4283-9a05-ec6b8e0abbd5","Type":"ContainerStarted","Data":"12da902da7bc3186ff9e21a54dfc852c5c22628d391269957b6e8dc84e4706f3"} Nov 21 15:52:09 crc kubenswrapper[4967]: I1121 15:52:09.804527 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" event={"ID":"073aff39-0ebc-4283-9a05-ec6b8e0abbd5","Type":"ContainerStarted","Data":"e33b1e2e613e3660ae60675b8d39189035dc78947324b1e95ab41abd9e703b90"} Nov 21 15:52:11 crc kubenswrapper[4967]: I1121 15:52:11.819570 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" event={"ID":"073aff39-0ebc-4283-9a05-ec6b8e0abbd5","Type":"ContainerStarted","Data":"e58a091f1ec46e1a58fc490f1477458b11d5007e21cd13a08f34eb29e23f7745"} Nov 21 15:52:11 crc kubenswrapper[4967]: I1121 15:52:11.820110 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:11 crc kubenswrapper[4967]: I1121 15:52:11.851285 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" podStartSLOduration=2.091548452 podStartE2EDuration="8.851263485s" podCreationTimestamp="2025-11-21 15:52:03 +0000 UTC" firstStartedPulling="2025-11-21 15:52:03.976490363 +0000 UTC m=+1012.235011391" lastFinishedPulling="2025-11-21 15:52:10.736205416 +0000 UTC m=+1018.994726424" observedRunningTime="2025-11-21 15:52:11.844856191 +0000 UTC m=+1020.103377219" watchObservedRunningTime="2025-11-21 15:52:11.851263485 +0000 UTC m=+1020.109784493" Nov 21 15:52:13 crc kubenswrapper[4967]: I1121 15:52:13.514722 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5c6987f5c-tljdg" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.605213 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.607549 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.634017 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-49g9d" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.654900 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.659163 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.668621 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-6nvk4" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.675481 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.705077 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.716015 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.720983 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.723240 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-b9jjv" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.723437 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.724748 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.737367 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-hdd76" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.782002 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.790799 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8ttw\" (UniqueName: \"kubernetes.io/projected/d1b75c6e-f666-4046-8f22-9a6fd96f9442-kube-api-access-b8ttw\") pod \"cinder-operator-controller-manager-6498cbf48f-65jcd\" (UID: \"d1b75c6e-f666-4046-8f22-9a6fd96f9442\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.790917 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzwvj\" (UniqueName: \"kubernetes.io/projected/c8665113-6713-4abd-8d58-66c16f2d678a-kube-api-access-pzwvj\") pod \"barbican-operator-controller-manager-75fb479bcc-pmb82\" (UID: \"c8665113-6713-4abd-8d58-66c16f2d678a\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.793598 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.804541 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.806006 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.811288 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-4x24c" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.846212 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.848054 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.857712 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-ptk4k" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.866085 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.878785 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.892638 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzwvj\" (UniqueName: \"kubernetes.io/projected/c8665113-6713-4abd-8d58-66c16f2d678a-kube-api-access-pzwvj\") pod \"barbican-operator-controller-manager-75fb479bcc-pmb82\" (UID: \"c8665113-6713-4abd-8d58-66c16f2d678a\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.892944 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sxql\" (UniqueName: \"kubernetes.io/projected/680de92b-f127-4cb3-86c4-3e4b9ae183df-kube-api-access-5sxql\") pod \"glance-operator-controller-manager-7969689c84-lrlvb\" (UID: \"680de92b-f127-4cb3-86c4-3e4b9ae183df\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.893036 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rqzp\" (UniqueName: \"kubernetes.io/projected/8853dca4-97bf-4b91-9523-a383122bd470-kube-api-access-2rqzp\") pod \"designate-operator-controller-manager-767ccfd65f-c8rct\" (UID: \"8853dca4-97bf-4b91-9523-a383122bd470\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.893115 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8ttw\" (UniqueName: \"kubernetes.io/projected/d1b75c6e-f666-4046-8f22-9a6fd96f9442-kube-api-access-b8ttw\") pod \"cinder-operator-controller-manager-6498cbf48f-65jcd\" (UID: \"d1b75c6e-f666-4046-8f22-9a6fd96f9442\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.913151 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-44jql"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.914871 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.918892 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-p4m2c" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.938184 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzwvj\" (UniqueName: \"kubernetes.io/projected/c8665113-6713-4abd-8d58-66c16f2d678a-kube-api-access-pzwvj\") pod \"barbican-operator-controller-manager-75fb479bcc-pmb82\" (UID: \"c8665113-6713-4abd-8d58-66c16f2d678a\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.938764 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.951054 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8ttw\" (UniqueName: \"kubernetes.io/projected/d1b75c6e-f666-4046-8f22-9a6fd96f9442-kube-api-access-b8ttw\") pod \"cinder-operator-controller-manager-6498cbf48f-65jcd\" (UID: \"d1b75c6e-f666-4046-8f22-9a6fd96f9442\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.965178 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.968031 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.973595 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.973713 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-qdxrd" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.991711 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22"] Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.993063 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.997025 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9g5dm\" (UniqueName: \"kubernetes.io/projected/bb306581-4364-431e-866d-49a92b74eab5-kube-api-access-9g5dm\") pod \"horizon-operator-controller-manager-598f69df5d-dvhqb\" (UID: \"bb306581-4364-431e-866d-49a92b74eab5\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.997097 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sxql\" (UniqueName: \"kubernetes.io/projected/680de92b-f127-4cb3-86c4-3e4b9ae183df-kube-api-access-5sxql\") pod \"glance-operator-controller-manager-7969689c84-lrlvb\" (UID: \"680de92b-f127-4cb3-86c4-3e4b9ae183df\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.997122 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rqzp\" (UniqueName: \"kubernetes.io/projected/8853dca4-97bf-4b91-9523-a383122bd470-kube-api-access-2rqzp\") pod \"designate-operator-controller-manager-767ccfd65f-c8rct\" (UID: \"8853dca4-97bf-4b91-9523-a383122bd470\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:52:30 crc kubenswrapper[4967]: I1121 15:52:30.997149 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5q5lm\" (UniqueName: \"kubernetes.io/projected/fcb67210-f9d3-483b-aa07-6f332130450c-kube-api-access-5q5lm\") pod \"heat-operator-controller-manager-56f54d6746-bxhgh\" (UID: \"fcb67210-f9d3-483b-aa07-6f332130450c\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.001761 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.010622 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-tw5fg" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.019416 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.034386 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rqzp\" (UniqueName: \"kubernetes.io/projected/8853dca4-97bf-4b91-9523-a383122bd470-kube-api-access-2rqzp\") pod \"designate-operator-controller-manager-767ccfd65f-c8rct\" (UID: \"8853dca4-97bf-4b91-9523-a383122bd470\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.040383 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.043565 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.065868 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-44jql"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.066424 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.068304 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-h6sfx" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.068962 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sxql\" (UniqueName: \"kubernetes.io/projected/680de92b-f127-4cb3-86c4-3e4b9ae183df-kube-api-access-5sxql\") pod \"glance-operator-controller-manager-7969689c84-lrlvb\" (UID: \"680de92b-f127-4cb3-86c4-3e4b9ae183df\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.089508 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.113957 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd74h\" (UniqueName: \"kubernetes.io/projected/ffe49522-20f7-4f17-9209-a782306baf71-kube-api-access-dd74h\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.114002 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwjgq\" (UniqueName: \"kubernetes.io/projected/9782f058-db28-4c8b-b1b7-ee270c4d76b4-kube-api-access-mwjgq\") pod \"ironic-operator-controller-manager-99b499f4-44jql\" (UID: \"9782f058-db28-4c8b-b1b7-ee270c4d76b4\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.114041 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.114071 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2j67\" (UniqueName: \"kubernetes.io/projected/5d919036-74e8-4637-b93e-fefc337cf51a-kube-api-access-j2j67\") pod \"keystone-operator-controller-manager-7454b96578-kvg22\" (UID: \"5d919036-74e8-4637-b93e-fefc337cf51a\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.114099 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9g5dm\" (UniqueName: \"kubernetes.io/projected/bb306581-4364-431e-866d-49a92b74eab5-kube-api-access-9g5dm\") pod \"horizon-operator-controller-manager-598f69df5d-dvhqb\" (UID: \"bb306581-4364-431e-866d-49a92b74eab5\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.114169 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5q5lm\" (UniqueName: \"kubernetes.io/projected/fcb67210-f9d3-483b-aa07-6f332130450c-kube-api-access-5q5lm\") pod \"heat-operator-controller-manager-56f54d6746-bxhgh\" (UID: \"fcb67210-f9d3-483b-aa07-6f332130450c\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.116949 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.126537 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.135379 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.136779 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.146535 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-5k99x" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.163636 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9g5dm\" (UniqueName: \"kubernetes.io/projected/bb306581-4364-431e-866d-49a92b74eab5-kube-api-access-9g5dm\") pod \"horizon-operator-controller-manager-598f69df5d-dvhqb\" (UID: \"bb306581-4364-431e-866d-49a92b74eab5\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.176206 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.193098 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5q5lm\" (UniqueName: \"kubernetes.io/projected/fcb67210-f9d3-483b-aa07-6f332130450c-kube-api-access-5q5lm\") pod \"heat-operator-controller-manager-56f54d6746-bxhgh\" (UID: \"fcb67210-f9d3-483b-aa07-6f332130450c\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.197108 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.215693 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.215763 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2j67\" (UniqueName: \"kubernetes.io/projected/5d919036-74e8-4637-b93e-fefc337cf51a-kube-api-access-j2j67\") pod \"keystone-operator-controller-manager-7454b96578-kvg22\" (UID: \"5d919036-74e8-4637-b93e-fefc337cf51a\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.215803 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dvgw\" (UniqueName: \"kubernetes.io/projected/43bebf9f-0691-416d-91e2-232a3a4d37d8-kube-api-access-7dvgw\") pod \"manila-operator-controller-manager-58f887965d-9m8rh\" (UID: \"43bebf9f-0691-416d-91e2-232a3a4d37d8\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.215940 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd74h\" (UniqueName: \"kubernetes.io/projected/ffe49522-20f7-4f17-9209-a782306baf71-kube-api-access-dd74h\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.215974 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwjgq\" (UniqueName: \"kubernetes.io/projected/9782f058-db28-4c8b-b1b7-ee270c4d76b4-kube-api-access-mwjgq\") pod \"ironic-operator-controller-manager-99b499f4-44jql\" (UID: \"9782f058-db28-4c8b-b1b7-ee270c4d76b4\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.216339 4967 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.216389 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert podName:ffe49522-20f7-4f17-9209-a782306baf71 nodeName:}" failed. No retries permitted until 2025-11-21 15:52:31.71637335 +0000 UTC m=+1039.974894358 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert") pod "infra-operator-controller-manager-6dd8864d7c-9rqgf" (UID: "ffe49522-20f7-4f17-9209-a782306baf71") : secret "infra-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.252099 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.255942 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd74h\" (UniqueName: \"kubernetes.io/projected/ffe49522-20f7-4f17-9209-a782306baf71-kube-api-access-dd74h\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.260256 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.261549 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.262128 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.283395 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwjgq\" (UniqueName: \"kubernetes.io/projected/9782f058-db28-4c8b-b1b7-ee270c4d76b4-kube-api-access-mwjgq\") pod \"ironic-operator-controller-manager-99b499f4-44jql\" (UID: \"9782f058-db28-4c8b-b1b7-ee270c4d76b4\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.284138 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-rz7mp" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.289880 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-8pvt5" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.298041 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.299478 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.317374 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm4fc\" (UniqueName: \"kubernetes.io/projected/95150f6f-2cf8-490f-a9fe-c01038ca1807-kube-api-access-qm4fc\") pod \"mariadb-operator-controller-manager-54b5986bb8-xjrxl\" (UID: \"95150f6f-2cf8-490f-a9fe-c01038ca1807\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.317459 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dvgw\" (UniqueName: \"kubernetes.io/projected/43bebf9f-0691-416d-91e2-232a3a4d37d8-kube-api-access-7dvgw\") pod \"manila-operator-controller-manager-58f887965d-9m8rh\" (UID: \"43bebf9f-0691-416d-91e2-232a3a4d37d8\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.322681 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-7jtfq" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.333158 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2j67\" (UniqueName: \"kubernetes.io/projected/5d919036-74e8-4637-b93e-fefc337cf51a-kube-api-access-j2j67\") pod \"keystone-operator-controller-manager-7454b96578-kvg22\" (UID: \"5d919036-74e8-4637-b93e-fefc337cf51a\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.339323 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.381056 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dvgw\" (UniqueName: \"kubernetes.io/projected/43bebf9f-0691-416d-91e2-232a3a4d37d8-kube-api-access-7dvgw\") pod \"manila-operator-controller-manager-58f887965d-9m8rh\" (UID: \"43bebf9f-0691-416d-91e2-232a3a4d37d8\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.413247 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.424924 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm4fc\" (UniqueName: \"kubernetes.io/projected/95150f6f-2cf8-490f-a9fe-c01038ca1807-kube-api-access-qm4fc\") pod \"mariadb-operator-controller-manager-54b5986bb8-xjrxl\" (UID: \"95150f6f-2cf8-490f-a9fe-c01038ca1807\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.425119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5txfv\" (UniqueName: \"kubernetes.io/projected/0b76a724-2c3b-47e1-a6bd-daada9e96cea-kube-api-access-5txfv\") pod \"neutron-operator-controller-manager-78bd47f458-dcz62\" (UID: \"0b76a724-2c3b-47e1-a6bd-daada9e96cea\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.425141 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n66gc\" (UniqueName: \"kubernetes.io/projected/8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d-kube-api-access-n66gc\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qjbwj\" (UID: \"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.425258 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkh59\" (UniqueName: \"kubernetes.io/projected/7f1b9439-5d7e-462e-b4ce-2cfa70363101-kube-api-access-dkh59\") pod \"nova-operator-controller-manager-cfbb9c588-jt8hb\" (UID: \"7f1b9439-5d7e-462e-b4ce-2cfa70363101\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.426025 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.467235 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.471976 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.472721 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.500287 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm4fc\" (UniqueName: \"kubernetes.io/projected/95150f6f-2cf8-490f-a9fe-c01038ca1807-kube-api-access-qm4fc\") pod \"mariadb-operator-controller-manager-54b5986bb8-xjrxl\" (UID: \"95150f6f-2cf8-490f-a9fe-c01038ca1807\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.517190 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.529564 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5txfv\" (UniqueName: \"kubernetes.io/projected/0b76a724-2c3b-47e1-a6bd-daada9e96cea-kube-api-access-5txfv\") pod \"neutron-operator-controller-manager-78bd47f458-dcz62\" (UID: \"0b76a724-2c3b-47e1-a6bd-daada9e96cea\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.529625 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n66gc\" (UniqueName: \"kubernetes.io/projected/8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d-kube-api-access-n66gc\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qjbwj\" (UID: \"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.529876 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkh59\" (UniqueName: \"kubernetes.io/projected/7f1b9439-5d7e-462e-b4ce-2cfa70363101-kube-api-access-dkh59\") pod \"nova-operator-controller-manager-cfbb9c588-jt8hb\" (UID: \"7f1b9439-5d7e-462e-b4ce-2cfa70363101\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.533996 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.561982 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n66gc\" (UniqueName: \"kubernetes.io/projected/8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d-kube-api-access-n66gc\") pod \"octavia-operator-controller-manager-54cfbf4c7d-qjbwj\" (UID: \"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.581752 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5txfv\" (UniqueName: \"kubernetes.io/projected/0b76a724-2c3b-47e1-a6bd-daada9e96cea-kube-api-access-5txfv\") pod \"neutron-operator-controller-manager-78bd47f458-dcz62\" (UID: \"0b76a724-2c3b-47e1-a6bd-daada9e96cea\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.582784 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkh59\" (UniqueName: \"kubernetes.io/projected/7f1b9439-5d7e-462e-b4ce-2cfa70363101-kube-api-access-dkh59\") pod \"nova-operator-controller-manager-cfbb9c588-jt8hb\" (UID: \"7f1b9439-5d7e-462e-b4ce-2cfa70363101\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.589383 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.592593 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.600919 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.601048 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-cf2k2" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.644446 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.645279 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.657096 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.657230 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.658801 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.662909 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-zrvdw" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.673132 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-f78mk"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.675101 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.677622 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-b7bvd" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.698751 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.734464 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-f78mk"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.739432 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd2tr\" (UniqueName: \"kubernetes.io/projected/dce06b30-88dd-4beb-b4cb-7982ed0a8000-kube-api-access-vd2tr\") pod \"ovn-operator-controller-manager-54fc5f65b7-jsj2l\" (UID: \"dce06b30-88dd-4beb-b4cb-7982ed0a8000\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.739620 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.739666 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.739708 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wrdt\" (UniqueName: \"kubernetes.io/projected/26d40ea4-3e61-4911-b5d4-a87a06b6698e-kube-api-access-8wrdt\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.739873 4967 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.739921 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert podName:ffe49522-20f7-4f17-9209-a782306baf71 nodeName:}" failed. No retries permitted until 2025-11-21 15:52:32.739905434 +0000 UTC m=+1040.998426442 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert") pod "infra-operator-controller-manager-6dd8864d7c-9rqgf" (UID: "ffe49522-20f7-4f17-9209-a782306baf71") : secret "infra-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.749671 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.751057 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.756100 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-84zfd" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.769428 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.778882 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.780507 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.785064 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-mnb2r" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.824575 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.830986 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.837025 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-rkkj4" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.841639 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.843569 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd2tr\" (UniqueName: \"kubernetes.io/projected/dce06b30-88dd-4beb-b4cb-7982ed0a8000-kube-api-access-vd2tr\") pod \"ovn-operator-controller-manager-54fc5f65b7-jsj2l\" (UID: \"dce06b30-88dd-4beb-b4cb-7982ed0a8000\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.843689 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ghth\" (UniqueName: \"kubernetes.io/projected/e8ef978f-0513-4008-a8f5-07c52a1979bb-kube-api-access-7ghth\") pod \"placement-operator-controller-manager-5b797b8dff-b8f9d\" (UID: \"e8ef978f-0513-4008-a8f5-07c52a1979bb\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.843817 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.843913 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wrdt\" (UniqueName: \"kubernetes.io/projected/26d40ea4-3e61-4911-b5d4-a87a06b6698e-kube-api-access-8wrdt\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.843973 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cff9g\" (UniqueName: \"kubernetes.io/projected/51e2d793-61c7-4587-ac51-fb644591ef74-kube-api-access-cff9g\") pod \"swift-operator-controller-manager-d656998f4-f78mk\" (UID: \"51e2d793-61c7-4587-ac51-fb644591ef74\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.844152 4967 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: E1121 15:52:31.844287 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert podName:26d40ea4-3e61-4911-b5d4-a87a06b6698e nodeName:}" failed. No retries permitted until 2025-11-21 15:52:32.344239078 +0000 UTC m=+1040.602760086 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert") pod "openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" (UID: "26d40ea4-3e61-4911-b5d4-a87a06b6698e") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.867990 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wrdt\" (UniqueName: \"kubernetes.io/projected/26d40ea4-3e61-4911-b5d4-a87a06b6698e-kube-api-access-8wrdt\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.877265 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd2tr\" (UniqueName: \"kubernetes.io/projected/dce06b30-88dd-4beb-b4cb-7982ed0a8000-kube-api-access-vd2tr\") pod \"ovn-operator-controller-manager-54fc5f65b7-jsj2l\" (UID: \"dce06b30-88dd-4beb-b4cb-7982ed0a8000\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.915024 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.930176 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.945624 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ghth\" (UniqueName: \"kubernetes.io/projected/e8ef978f-0513-4008-a8f5-07c52a1979bb-kube-api-access-7ghth\") pod \"placement-operator-controller-manager-5b797b8dff-b8f9d\" (UID: \"e8ef978f-0513-4008-a8f5-07c52a1979bb\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.946065 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j7t2\" (UniqueName: \"kubernetes.io/projected/fba0599a-65d9-4254-b118-6527649ffb1e-kube-api-access-5j7t2\") pod \"telemetry-operator-controller-manager-654d9964b7-j7n2g\" (UID: \"fba0599a-65d9-4254-b118-6527649ffb1e\") " pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.946096 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbvhf\" (UniqueName: \"kubernetes.io/projected/cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1-kube-api-access-bbvhf\") pod \"test-operator-controller-manager-b4c496f69-wr7n5\" (UID: \"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.946122 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cff9g\" (UniqueName: \"kubernetes.io/projected/51e2d793-61c7-4587-ac51-fb644591ef74-kube-api-access-cff9g\") pod \"swift-operator-controller-manager-d656998f4-f78mk\" (UID: \"51e2d793-61c7-4587-ac51-fb644591ef74\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.949986 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.951783 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.956106 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-74rk9" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.965229 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx"] Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.973567 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ghth\" (UniqueName: \"kubernetes.io/projected/e8ef978f-0513-4008-a8f5-07c52a1979bb-kube-api-access-7ghth\") pod \"placement-operator-controller-manager-5b797b8dff-b8f9d\" (UID: \"e8ef978f-0513-4008-a8f5-07c52a1979bb\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.977247 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cff9g\" (UniqueName: \"kubernetes.io/projected/51e2d793-61c7-4587-ac51-fb644591ef74-kube-api-access-cff9g\") pod \"swift-operator-controller-manager-d656998f4-f78mk\" (UID: \"51e2d793-61c7-4587-ac51-fb644591ef74\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:52:31 crc kubenswrapper[4967]: I1121 15:52:31.996597 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.029810 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.033687 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.036878 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-fh6g8" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.037722 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.038915 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.044108 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.050353 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xd4nt\" (UniqueName: \"kubernetes.io/projected/39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4-kube-api-access-xd4nt\") pod \"watcher-operator-controller-manager-8c6448b9f-qh6wx\" (UID: \"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.050556 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j7t2\" (UniqueName: \"kubernetes.io/projected/fba0599a-65d9-4254-b118-6527649ffb1e-kube-api-access-5j7t2\") pod \"telemetry-operator-controller-manager-654d9964b7-j7n2g\" (UID: \"fba0599a-65d9-4254-b118-6527649ffb1e\") " pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.050590 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbvhf\" (UniqueName: \"kubernetes.io/projected/cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1-kube-api-access-bbvhf\") pod \"test-operator-controller-manager-b4c496f69-wr7n5\" (UID: \"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.069475 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.070854 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.075445 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xzpkz" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.076387 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j7t2\" (UniqueName: \"kubernetes.io/projected/fba0599a-65d9-4254-b118-6527649ffb1e-kube-api-access-5j7t2\") pod \"telemetry-operator-controller-manager-654d9964b7-j7n2g\" (UID: \"fba0599a-65d9-4254-b118-6527649ffb1e\") " pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.090917 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.091726 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbvhf\" (UniqueName: \"kubernetes.io/projected/cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1-kube-api-access-bbvhf\") pod \"test-operator-controller-manager-b4c496f69-wr7n5\" (UID: \"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.100021 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.157530 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s69xl\" (UniqueName: \"kubernetes.io/projected/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-kube-api-access-s69xl\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.157602 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xd4nt\" (UniqueName: \"kubernetes.io/projected/39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4-kube-api-access-xd4nt\") pod \"watcher-operator-controller-manager-8c6448b9f-qh6wx\" (UID: \"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.157630 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.157722 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb4sn\" (UniqueName: \"kubernetes.io/projected/e107a04d-5715-481d-94d7-b99ad7f3e95d-kube-api-access-sb4sn\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj\" (UID: \"e107a04d-5715-481d-94d7-b99ad7f3e95d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.161122 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.178990 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.182285 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xd4nt\" (UniqueName: \"kubernetes.io/projected/39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4-kube-api-access-xd4nt\") pod \"watcher-operator-controller-manager-8c6448b9f-qh6wx\" (UID: \"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4\") " pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.221144 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.234073 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.287238 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.287737 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb4sn\" (UniqueName: \"kubernetes.io/projected/e107a04d-5715-481d-94d7-b99ad7f3e95d-kube-api-access-sb4sn\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj\" (UID: \"e107a04d-5715-481d-94d7-b99ad7f3e95d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.288033 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s69xl\" (UniqueName: \"kubernetes.io/projected/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-kube-api-access-s69xl\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: E1121 15:52:32.289596 4967 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 21 15:52:32 crc kubenswrapper[4967]: E1121 15:52:32.289680 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert podName:cb33f2a5-e4b0-4ebf-9ddb-03979139e785 nodeName:}" failed. No retries permitted until 2025-11-21 15:52:32.78964997 +0000 UTC m=+1041.048170978 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert") pod "openstack-operator-controller-manager-648ff6d765-v2pj5" (UID: "cb33f2a5-e4b0-4ebf-9ddb-03979139e785") : secret "webhook-server-cert" not found Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.312914 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s69xl\" (UniqueName: \"kubernetes.io/projected/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-kube-api-access-s69xl\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.322812 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb4sn\" (UniqueName: \"kubernetes.io/projected/e107a04d-5715-481d-94d7-b99ad7f3e95d-kube-api-access-sb4sn\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj\" (UID: \"e107a04d-5715-481d-94d7-b99ad7f3e95d\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.364626 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb"] Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.390826 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.401827 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/26d40ea4-3e61-4911-b5d4-a87a06b6698e-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-xq76g\" (UID: \"26d40ea4-3e61-4911-b5d4-a87a06b6698e\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.536384 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.575513 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.796804 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.796866 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.800995 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe49522-20f7-4f17-9209-a782306baf71-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-9rqgf\" (UID: \"ffe49522-20f7-4f17-9209-a782306baf71\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.801350 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cb33f2a5-e4b0-4ebf-9ddb-03979139e785-cert\") pod \"openstack-operator-controller-manager-648ff6d765-v2pj5\" (UID: \"cb33f2a5-e4b0-4ebf-9ddb-03979139e785\") " pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.856102 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:52:32 crc kubenswrapper[4967]: I1121 15:52:32.923856 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.002950 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" event={"ID":"680de92b-f127-4cb3-86c4-3e4b9ae183df","Type":"ContainerStarted","Data":"cc71a7d2a37b249750a4bbebe64554dea09b0a71d3c7149abce5a9254c177ce3"} Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.004120 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" event={"ID":"c8665113-6713-4abd-8d58-66c16f2d678a","Type":"ContainerStarted","Data":"b9c2bc94989662752aa3573d54f40cb2d8999bee3f6c6a2d1a7e66a405528d3c"} Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.102744 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.302061 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22"] Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.322848 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b75c6e_f666_4046_8f22_9a6fd96f9442.slice/crio-cb84b992ddfbf21711dbc89aeffbf7822c7b337774dc96557833086a5fa176bc WatchSource:0}: Error finding container cb84b992ddfbf21711dbc89aeffbf7822c7b337774dc96557833086a5fa176bc: Status 404 returned error can't find the container with id cb84b992ddfbf21711dbc89aeffbf7822c7b337774dc96557833086a5fa176bc Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.334958 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.348478 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.376952 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.392995 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.402384 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-44jql"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.411488 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.420561 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct"] Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.433425 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9782f058_db28_4c8b_b1b7_ee270c4d76b4.slice/crio-f34472f693bb32ac771c7d9697564ca8f8be44ea0a24c9fde8109440f81c3f13 WatchSource:0}: Error finding container f34472f693bb32ac771c7d9697564ca8f8be44ea0a24c9fde8109440f81c3f13: Status 404 returned error can't find the container with id f34472f693bb32ac771c7d9697564ca8f8be44ea0a24c9fde8109440f81c3f13 Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.434001 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod95150f6f_2cf8_490f_a9fe_c01038ca1807.slice/crio-7c7c41ab7a25db2c2266dbbb840a01d30aa30856bffb86470bf097bceaae3594 WatchSource:0}: Error finding container 7c7c41ab7a25db2c2266dbbb840a01d30aa30856bffb86470bf097bceaae3594: Status 404 returned error can't find the container with id 7c7c41ab7a25db2c2266dbbb840a01d30aa30856bffb86470bf097bceaae3594 Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.499262 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d"] Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.499492 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8ef978f_0513_4008_a8f5_07c52a1979bb.slice/crio-a056a9e59f4123ada750175457d56b208c2cfa54592a856180c468813993263f WatchSource:0}: Error finding container a056a9e59f4123ada750175457d56b208c2cfa54592a856180c468813993263f: Status 404 returned error can't find the container with id a056a9e59f4123ada750175457d56b208c2cfa54592a856180c468813993263f Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.502097 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddce06b30_88dd_4beb_b4cb_7982ed0a8000.slice/crio-51d411b9251149b60c505790bf5ddc89c8f73d8d851041f001ced259f317a22c WatchSource:0}: Error finding container 51d411b9251149b60c505790bf5ddc89c8f73d8d851041f001ced259f317a22c: Status 404 returned error can't find the container with id 51d411b9251149b60c505790bf5ddc89c8f73d8d851041f001ced259f317a22c Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.507512 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l"] Nov 21 15:52:33 crc kubenswrapper[4967]: E1121 15:52:33.509470 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vd2tr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-54fc5f65b7-jsj2l_openstack-operators(dce06b30-88dd-4beb-b4cb-7982ed0a8000): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.525925 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb"] Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.586436 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62"] Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.591794 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b76a724_2c3b_47e1_a6bd_daada9e96cea.slice/crio-8c8d12b1a8db687ff7c555bb3ea578218994721e785d46c1b8b3fa7cdcd1158b WatchSource:0}: Error finding container 8c8d12b1a8db687ff7c555bb3ea578218994721e785d46c1b8b3fa7cdcd1158b: Status 404 returned error can't find the container with id 8c8d12b1a8db687ff7c555bb3ea578218994721e785d46c1b8b3fa7cdcd1158b Nov 21 15:52:33 crc kubenswrapper[4967]: E1121 15:52:33.683634 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" podUID="dce06b30-88dd-4beb-b4cb-7982ed0a8000" Nov 21 15:52:33 crc kubenswrapper[4967]: I1121 15:52:33.960384 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj"] Nov 21 15:52:33 crc kubenswrapper[4967]: W1121 15:52:33.982092 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode107a04d_5715_481d_94d7_b99ad7f3e95d.slice/crio-8a1ef194edd971fbe28c08fbfc0c6748d04b5dcd081ada651ca92bf73185e061 WatchSource:0}: Error finding container 8a1ef194edd971fbe28c08fbfc0c6748d04b5dcd081ada651ca92bf73185e061: Status 404 returned error can't find the container with id 8a1ef194edd971fbe28c08fbfc0c6748d04b5dcd081ada651ca92bf73185e061 Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.007528 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-f78mk"] Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.032638 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx"] Nov 21 15:52:34 crc kubenswrapper[4967]: W1121 15:52:34.035058 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51e2d793_61c7_4587_ac51_fb644591ef74.slice/crio-c779cf2de5a975602438f36ed3b6d0dc71493d09bcadf559588c6fdbf642692b WatchSource:0}: Error finding container c779cf2de5a975602438f36ed3b6d0dc71493d09bcadf559588c6fdbf642692b: Status 404 returned error can't find the container with id c779cf2de5a975602438f36ed3b6d0dc71493d09bcadf559588c6fdbf642692b Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.037851 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g"] Nov 21 15:52:34 crc kubenswrapper[4967]: W1121 15:52:34.042990 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26d40ea4_3e61_4911_b5d4_a87a06b6698e.slice/crio-9cc2a02090a6a9515564ecb4cf5856179ed273ad429860e6e136a7f2abf4eee3 WatchSource:0}: Error finding container 9cc2a02090a6a9515564ecb4cf5856179ed273ad429860e6e136a7f2abf4eee3: Status 404 returned error can't find the container with id 9cc2a02090a6a9515564ecb4cf5856179ed273ad429860e6e136a7f2abf4eee3 Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.043883 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xd4nt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-8c6448b9f-qh6wx_openstack-operators(39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.048083 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5"] Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.051430 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8wrdt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-8c7444f48-xq76g_openstack-operators(26d40ea4-3e61-4911-b5d4-a87a06b6698e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.053726 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" event={"ID":"9782f058-db28-4c8b-b1b7-ee270c4d76b4","Type":"ContainerStarted","Data":"f34472f693bb32ac771c7d9697564ca8f8be44ea0a24c9fde8109440f81c3f13"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.057789 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g"] Nov 21 15:52:34 crc kubenswrapper[4967]: W1121 15:52:34.058333 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfba0599a_65d9_4254_b118_6527649ffb1e.slice/crio-bd42920caf1dc0fdb99f6bbab51adc5bea7d7685a171f6c9a23d32e2b8174673 WatchSource:0}: Error finding container bd42920caf1dc0fdb99f6bbab51adc5bea7d7685a171f6c9a23d32e2b8174673: Status 404 returned error can't find the container with id bd42920caf1dc0fdb99f6bbab51adc5bea7d7685a171f6c9a23d32e2b8174673 Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.059413 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" event={"ID":"95150f6f-2cf8-490f-a9fe-c01038ca1807","Type":"ContainerStarted","Data":"7c7c41ab7a25db2c2266dbbb840a01d30aa30856bffb86470bf097bceaae3594"} Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.065162 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.196:5001/openstack-k8s-operators/telemetry-operator:57284e36a25ee63dd9ec7bcaee4ba1f50bec5c34,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5j7t2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-654d9964b7-j7n2g_openstack-operators(fba0599a-65d9-4254-b118-6527649ffb1e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.066465 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" event={"ID":"e8ef978f-0513-4008-a8f5-07c52a1979bb","Type":"ContainerStarted","Data":"a056a9e59f4123ada750175457d56b208c2cfa54592a856180c468813993263f"} Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.073124 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bbvhf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-b4c496f69-wr7n5_openstack-operators(cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.083451 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" event={"ID":"dce06b30-88dd-4beb-b4cb-7982ed0a8000","Type":"ContainerStarted","Data":"788febf38a8d2978bfa711ecdee284288a602c37e96d48679ac7433b8f2d811f"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.083492 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" event={"ID":"dce06b30-88dd-4beb-b4cb-7982ed0a8000","Type":"ContainerStarted","Data":"51d411b9251149b60c505790bf5ddc89c8f73d8d851041f001ced259f317a22c"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.115865 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf"] Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.121849 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" event={"ID":"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d","Type":"ContainerStarted","Data":"941c2787ece5d1ebe6f399b476a8bc112d5c2112c94a9a8bdde642f7d6ff5fb2"} Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.123775 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" podUID="dce06b30-88dd-4beb-b4cb-7982ed0a8000" Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.124726 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" event={"ID":"e107a04d-5715-481d-94d7-b99ad7f3e95d","Type":"ContainerStarted","Data":"8a1ef194edd971fbe28c08fbfc0c6748d04b5dcd081ada651ca92bf73185e061"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.129297 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" event={"ID":"fcb67210-f9d3-483b-aa07-6f332130450c","Type":"ContainerStarted","Data":"3767244af5e47c2cb36088354320c37ddb0f49118eaa2decf58f2c4957531db4"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.144291 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" event={"ID":"8853dca4-97bf-4b91-9523-a383122bd470","Type":"ContainerStarted","Data":"462e190a1533a0acd58bbeaca36d97516020cb0fcde596b2bd2493666377c49a"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.150332 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" event={"ID":"7f1b9439-5d7e-462e-b4ce-2cfa70363101","Type":"ContainerStarted","Data":"5c00d0e323918fc9cc1fb78f75c2b2deac8d2446e1ee14893d9885e5426d3ce8"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.153009 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" event={"ID":"43bebf9f-0691-416d-91e2-232a3a4d37d8","Type":"ContainerStarted","Data":"f5b678e96db4f8ba6e1602a47ffb6900474b1142f63089dd419f1f88e59ce137"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.154538 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" event={"ID":"5d919036-74e8-4637-b93e-fefc337cf51a","Type":"ContainerStarted","Data":"2fe0b924a1f5f2956d6fddd10674ee338def87fbac10d1a1258ee907ebc27eac"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.155949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" event={"ID":"d1b75c6e-f666-4046-8f22-9a6fd96f9442","Type":"ContainerStarted","Data":"cb84b992ddfbf21711dbc89aeffbf7822c7b337774dc96557833086a5fa176bc"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.157121 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" event={"ID":"bb306581-4364-431e-866d-49a92b74eab5","Type":"ContainerStarted","Data":"05a9f4167672e247e7b1e317139b817f13f6fc01cc5a662f86bd5b8a7d6859ad"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.161893 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" event={"ID":"0b76a724-2c3b-47e1-a6bd-daada9e96cea","Type":"ContainerStarted","Data":"8c8d12b1a8db687ff7c555bb3ea578218994721e785d46c1b8b3fa7cdcd1158b"} Nov 21 15:52:34 crc kubenswrapper[4967]: I1121 15:52:34.162512 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5"] Nov 21 15:52:34 crc kubenswrapper[4967]: W1121 15:52:34.185590 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffe49522_20f7_4f17_9209_a782306baf71.slice/crio-6f74fe3face2d70aa6be3d13dd089c39176119aedb1c8774097b1b07c511b2a3 WatchSource:0}: Error finding container 6f74fe3face2d70aa6be3d13dd089c39176119aedb1c8774097b1b07c511b2a3: Status 404 returned error can't find the container with id 6f74fe3face2d70aa6be3d13dd089c39176119aedb1c8774097b1b07c511b2a3 Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.329861 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" podUID="cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1" Nov 21 15:52:34 crc kubenswrapper[4967]: E1121 15:52:34.406819 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" podUID="39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4" Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.187089 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" event={"ID":"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1","Type":"ContainerStarted","Data":"06824ee9f1f2739d9061d62b19df944fceab9c6cff7a0e3d05cc6aada257db16"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.187144 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" event={"ID":"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1","Type":"ContainerStarted","Data":"6a40871b10872e5be746d802b1c3f3036c1e9c9839642da8f0422155706c45ae"} Nov 21 15:52:35 crc kubenswrapper[4967]: E1121 15:52:35.188988 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" podUID="cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1" Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.189097 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" event={"ID":"51e2d793-61c7-4587-ac51-fb644591ef74","Type":"ContainerStarted","Data":"c779cf2de5a975602438f36ed3b6d0dc71493d09bcadf559588c6fdbf642692b"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.190826 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" event={"ID":"cb33f2a5-e4b0-4ebf-9ddb-03979139e785","Type":"ContainerStarted","Data":"33de66fa032675a8f7d0b3d2f7ec495771e05083612707a450056002d426234b"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.194190 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" event={"ID":"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4","Type":"ContainerStarted","Data":"f0fb8f0f028a104738cb1e066c25b980274b9af0d93325ff239fc4e232aefdc5"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.194221 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" event={"ID":"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4","Type":"ContainerStarted","Data":"ecb8615f9ee1042bb5e4323cfdddc0ea4b029d21ac0b9c753773f55c57649e8e"} Nov 21 15:52:35 crc kubenswrapper[4967]: E1121 15:52:35.195551 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" podUID="39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4" Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.195959 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" event={"ID":"fba0599a-65d9-4254-b118-6527649ffb1e","Type":"ContainerStarted","Data":"bd42920caf1dc0fdb99f6bbab51adc5bea7d7685a171f6c9a23d32e2b8174673"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.198491 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" event={"ID":"26d40ea4-3e61-4911-b5d4-a87a06b6698e","Type":"ContainerStarted","Data":"9cc2a02090a6a9515564ecb4cf5856179ed273ad429860e6e136a7f2abf4eee3"} Nov 21 15:52:35 crc kubenswrapper[4967]: I1121 15:52:35.199984 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" event={"ID":"ffe49522-20f7-4f17-9209-a782306baf71","Type":"ContainerStarted","Data":"6f74fe3face2d70aa6be3d13dd089c39176119aedb1c8774097b1b07c511b2a3"} Nov 21 15:52:35 crc kubenswrapper[4967]: E1121 15:52:35.201912 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" podUID="dce06b30-88dd-4beb-b4cb-7982ed0a8000" Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.054361 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" podUID="26d40ea4-3e61-4911-b5d4-a87a06b6698e" Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.056350 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" podUID="fba0599a-65d9-4254-b118-6527649ffb1e" Nov 21 15:52:36 crc kubenswrapper[4967]: I1121 15:52:36.211324 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" event={"ID":"26d40ea4-3e61-4911-b5d4-a87a06b6698e","Type":"ContainerStarted","Data":"4a87dcfef316bea27070cab545fe6417c898c5ce2766a3228bcb4c7703906555"} Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.212820 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" podUID="26d40ea4-3e61-4911-b5d4-a87a06b6698e" Nov 21 15:52:36 crc kubenswrapper[4967]: I1121 15:52:36.214990 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" event={"ID":"fba0599a-65d9-4254-b118-6527649ffb1e","Type":"ContainerStarted","Data":"02a35a9cc997b7156a89313ccef7864c12f792a230b8e5a59872f0df2fe90d26"} Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.216267 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" podUID="39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4" Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.216327 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" podUID="cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1" Nov 21 15:52:36 crc kubenswrapper[4967]: E1121 15:52:36.216476 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/telemetry-operator:57284e36a25ee63dd9ec7bcaee4ba1f50bec5c34\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" podUID="fba0599a-65d9-4254-b118-6527649ffb1e" Nov 21 15:52:37 crc kubenswrapper[4967]: E1121 15:52:37.227680 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/telemetry-operator:57284e36a25ee63dd9ec7bcaee4ba1f50bec5c34\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" podUID="fba0599a-65d9-4254-b118-6527649ffb1e" Nov 21 15:52:37 crc kubenswrapper[4967]: E1121 15:52:37.228540 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" podUID="26d40ea4-3e61-4911-b5d4-a87a06b6698e" Nov 21 15:52:52 crc kubenswrapper[4967]: E1121 15:52:52.312979 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 21 15:52:52 crc kubenswrapper[4967]: E1121 15:52:52.313622 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-n66gc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-54cfbf4c7d-qjbwj_openstack-operators(8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:52:56 crc kubenswrapper[4967]: E1121 15:52:56.334209 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f" Nov 21 15:52:56 crc kubenswrapper[4967]: E1121 15:52:56.334873 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2rqzp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-767ccfd65f-c8rct_openstack-operators(8853dca4-97bf-4b91-9523-a383122bd470): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:52:56 crc kubenswrapper[4967]: E1121 15:52:56.751752 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96" Nov 21 15:52:56 crc kubenswrapper[4967]: E1121 15:52:56.752147 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5q5lm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-56f54d6746-bxhgh_openstack-operators(fcb67210-f9d3-483b-aa07-6f332130450c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:52:58 crc kubenswrapper[4967]: E1121 15:52:58.573829 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04" Nov 21 15:52:58 crc kubenswrapper[4967]: E1121 15:52:58.574018 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qm4fc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-54b5986bb8-xjrxl_openstack-operators(95150f6f-2cf8-490f-a9fe-c01038ca1807): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:52:58 crc kubenswrapper[4967]: E1121 15:52:58.996142 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0" Nov 21 15:52:58 crc kubenswrapper[4967]: E1121 15:52:58.997794 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cff9g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d656998f4-f78mk_openstack-operators(51e2d793-61c7-4587-ac51-fb644591ef74): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:53:01 crc kubenswrapper[4967]: E1121 15:53:01.035855 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894" Nov 21 15:53:01 crc kubenswrapper[4967]: E1121 15:53:01.036339 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dd74h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6dd8864d7c-9rqgf_openstack-operators(ffe49522-20f7-4f17-9209-a782306baf71): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:53:03 crc kubenswrapper[4967]: I1121 15:53:03.451882 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" event={"ID":"cb33f2a5-e4b0-4ebf-9ddb-03979139e785","Type":"ContainerStarted","Data":"bcfb2bc05b5189339414d4a9813edbf7532d941fb8e5e095244990328bfbf877"} Nov 21 15:53:07 crc kubenswrapper[4967]: E1121 15:53:07.636020 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 21 15:53:07 crc kubenswrapper[4967]: E1121 15:53:07.636685 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sb4sn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj_openstack-operators(e107a04d-5715-481d-94d7-b99ad7f3e95d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:53:07 crc kubenswrapper[4967]: E1121 15:53:07.638736 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" podUID="e107a04d-5715-481d-94d7-b99ad7f3e95d" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.292586 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" podUID="8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d" Nov 21 15:53:08 crc kubenswrapper[4967]: I1121 15:53:08.490878 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" event={"ID":"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d","Type":"ContainerStarted","Data":"6598b9a236d81f83e99b4588af1a975c24a929ef3f8eff719e8072a22d80ee8a"} Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.492145 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" podUID="e107a04d-5715-481d-94d7-b99ad7f3e95d" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.589445 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" podUID="fcb67210-f9d3-483b-aa07-6f332130450c" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.650970 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" podUID="8853dca4-97bf-4b91-9523-a383122bd470" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.675295 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" podUID="51e2d793-61c7-4587-ac51-fb644591ef74" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.743871 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" podUID="95150f6f-2cf8-490f-a9fe-c01038ca1807" Nov 21 15:53:08 crc kubenswrapper[4967]: E1121 15:53:08.855213 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" podUID="ffe49522-20f7-4f17-9209-a782306baf71" Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.501327 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" event={"ID":"43bebf9f-0691-416d-91e2-232a3a4d37d8","Type":"ContainerStarted","Data":"89593c1cf6fa3cc77719082a471a51f5d3932ee8acc41d14e01bad4d4237a7d2"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.502705 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" event={"ID":"51e2d793-61c7-4587-ac51-fb644591ef74","Type":"ContainerStarted","Data":"a47c2932c175eda41b318b11bb4688d9fdfc7ad9b19132b23b7a1e0cc35883d0"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.505554 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" event={"ID":"e8ef978f-0513-4008-a8f5-07c52a1979bb","Type":"ContainerStarted","Data":"7491ea5d53acdeb32d321d1166b12b39db0bacdbb05ce7d2dd95a4df472c4696"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.523937 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" event={"ID":"5d919036-74e8-4637-b93e-fefc337cf51a","Type":"ContainerStarted","Data":"3e3df2efe47c0b268c879349da3a07849d6e9d6bf28800718c83ed5f402283b7"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.533702 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" event={"ID":"9782f058-db28-4c8b-b1b7-ee270c4d76b4","Type":"ContainerStarted","Data":"2a64b94e8e329dd8d0961542ceb648d1a133efebd38db0ed5d6de606e87a1571"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.538481 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" event={"ID":"8853dca4-97bf-4b91-9523-a383122bd470","Type":"ContainerStarted","Data":"ec6d7e703ff27e59856c0b8cdfccc24f5faedcd6a148cb0c2f5dfd203cefef37"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.540337 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" event={"ID":"7f1b9439-5d7e-462e-b4ce-2cfa70363101","Type":"ContainerStarted","Data":"38d7958da335fe8c4586eb97c1cb4545340a45d413f22c0522998efb37daa138"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.543881 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" event={"ID":"fcb67210-f9d3-483b-aa07-6f332130450c","Type":"ContainerStarted","Data":"5b9c23d6290df68d3b6b8857961c922ab18810a3b7f827155551e940821a7c94"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.563683 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" event={"ID":"cb33f2a5-e4b0-4ebf-9ddb-03979139e785","Type":"ContainerStarted","Data":"497e4e743b6a765ba0b7c0a0f1fef595a704cc344fd1e4b624c05a33cb8ae57c"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.565444 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.574904 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.584649 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" event={"ID":"ffe49522-20f7-4f17-9209-a782306baf71","Type":"ContainerStarted","Data":"1123a08368d482449f6ea71b7aa66bd5d96e2ddc560d3d8f78adee3e0ed505c4"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.599805 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" event={"ID":"d1b75c6e-f666-4046-8f22-9a6fd96f9442","Type":"ContainerStarted","Data":"dffece73e67cf477cacbd880344950be1953b88bae8f67ada7c45ec3b68e613e"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.611095 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" event={"ID":"95150f6f-2cf8-490f-a9fe-c01038ca1807","Type":"ContainerStarted","Data":"73bb596f3afa0e526e8616fd69b6395c58650f00358cf7f0e145a1528cda614a"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.625386 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-648ff6d765-v2pj5" podStartSLOduration=38.625363332 podStartE2EDuration="38.625363332s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:53:09.607679005 +0000 UTC m=+1077.866200013" watchObservedRunningTime="2025-11-21 15:53:09.625363332 +0000 UTC m=+1077.883884340" Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.628920 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" event={"ID":"bb306581-4364-431e-866d-49a92b74eab5","Type":"ContainerStarted","Data":"eea9dc7e23c5c8141526d78e24684dccbd53c4878f5217d0addbcac8641a035e"} Nov 21 15:53:09 crc kubenswrapper[4967]: I1121 15:53:09.638442 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" event={"ID":"0b76a724-2c3b-47e1-a6bd-daada9e96cea","Type":"ContainerStarted","Data":"e2ab4e1e554af7e0fb50210489af3e158281d52ca445be792e2955bac1496b56"} Nov 21 15:53:10 crc kubenswrapper[4967]: E1121 15:53:10.595783 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" podUID="ffe49522-20f7-4f17-9209-a782306baf71" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.657888 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" event={"ID":"cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1","Type":"ContainerStarted","Data":"d6aa365e03514c121ecfc6e8eaa0e80ddd765693a9e0cfc2afcd8c70fefbd804"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.658936 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.665918 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" event={"ID":"9782f058-db28-4c8b-b1b7-ee270c4d76b4","Type":"ContainerStarted","Data":"c1cdbfb5f84be9635e350b5db0b3c7fec1f6590f0415c48765682081acee432f"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.666713 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.669158 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" event={"ID":"fba0599a-65d9-4254-b118-6527649ffb1e","Type":"ContainerStarted","Data":"e9b5a1c4ca0ab681309787a40f1007e54e6f7a66573cec897fc765b5bd5fa889"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.669568 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.680468 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" event={"ID":"680de92b-f127-4cb3-86c4-3e4b9ae183df","Type":"ContainerStarted","Data":"f9f804b372ec0865d06a574511e783b0ad2c4fb2d355d1b5b47d278567458f58"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.694746 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" event={"ID":"39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4","Type":"ContainerStarted","Data":"4613b0c4abbdec956e4aa3367561f7240af0e56d6b7ff121b00c8e2a228cf42d"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.695620 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.698520 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" podStartSLOduration=5.52731841 podStartE2EDuration="39.698502768s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.072750581 +0000 UTC m=+1042.331271589" lastFinishedPulling="2025-11-21 15:53:08.243934939 +0000 UTC m=+1076.502455947" observedRunningTime="2025-11-21 15:53:10.695827931 +0000 UTC m=+1078.954348939" watchObservedRunningTime="2025-11-21 15:53:10.698502768 +0000 UTC m=+1078.957023766" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.703259 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" event={"ID":"7f1b9439-5d7e-462e-b4ce-2cfa70363101","Type":"ContainerStarted","Data":"3a67ad2eaa3ef203afbd3b30ae3590381f26349c41b015ade315beb99bd5444a"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.704154 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.706557 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" event={"ID":"26d40ea4-3e61-4911-b5d4-a87a06b6698e","Type":"ContainerStarted","Data":"b1d10d8911bf0448bd93a2ec1aeae01cec3ce8fa620ecd061f0d85f741544c11"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.707378 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.725213 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" event={"ID":"c8665113-6713-4abd-8d58-66c16f2d678a","Type":"ContainerStarted","Data":"fe901ba39d578c94604c2c695b92c8afd16006d86738bf3e0cf16e5614e97d05"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.727006 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" podStartSLOduration=5.451180875 podStartE2EDuration="39.726984295s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.043767879 +0000 UTC m=+1042.302288887" lastFinishedPulling="2025-11-21 15:53:08.319571299 +0000 UTC m=+1076.578092307" observedRunningTime="2025-11-21 15:53:10.718422719 +0000 UTC m=+1078.976943747" watchObservedRunningTime="2025-11-21 15:53:10.726984295 +0000 UTC m=+1078.985505303" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.748111 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" event={"ID":"dce06b30-88dd-4beb-b4cb-7982ed0a8000","Type":"ContainerStarted","Data":"c6ec3aa927b96120f095207054828de898b051f1599d4fad8c03c87032cbc3d0"} Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.762889 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" podStartSLOduration=5.515208873 podStartE2EDuration="39.762869965s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.06503249 +0000 UTC m=+1042.323553498" lastFinishedPulling="2025-11-21 15:53:08.312693582 +0000 UTC m=+1076.571214590" observedRunningTime="2025-11-21 15:53:10.758143609 +0000 UTC m=+1079.016664617" watchObservedRunningTime="2025-11-21 15:53:10.762869965 +0000 UTC m=+1079.021390993" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.805627 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" podStartSLOduration=9.447721387 podStartE2EDuration="40.805602681s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.441081324 +0000 UTC m=+1041.699602332" lastFinishedPulling="2025-11-21 15:53:04.798962618 +0000 UTC m=+1073.057483626" observedRunningTime="2025-11-21 15:53:10.796962493 +0000 UTC m=+1079.055483521" watchObservedRunningTime="2025-11-21 15:53:10.805602681 +0000 UTC m=+1079.064123689" Nov 21 15:53:10 crc kubenswrapper[4967]: E1121 15:53:10.854685 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" podUID="ffe49522-20f7-4f17-9209-a782306baf71" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.883570 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" podStartSLOduration=6.655438637 podStartE2EDuration="40.883546788s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.050883464 +0000 UTC m=+1042.309404472" lastFinishedPulling="2025-11-21 15:53:08.278991625 +0000 UTC m=+1076.537512623" observedRunningTime="2025-11-21 15:53:10.849071469 +0000 UTC m=+1079.107592477" watchObservedRunningTime="2025-11-21 15:53:10.883546788 +0000 UTC m=+1079.142067796" Nov 21 15:53:10 crc kubenswrapper[4967]: I1121 15:53:10.981970 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" podStartSLOduration=9.691839612999999 podStartE2EDuration="40.981942342s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.508649123 +0000 UTC m=+1041.767170131" lastFinishedPulling="2025-11-21 15:53:04.798751852 +0000 UTC m=+1073.057272860" observedRunningTime="2025-11-21 15:53:10.967979071 +0000 UTC m=+1079.226500089" watchObservedRunningTime="2025-11-21 15:53:10.981942342 +0000 UTC m=+1079.240463350" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.005869 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" podStartSLOduration=5.327831855 podStartE2EDuration="40.005851178s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.508804057 +0000 UTC m=+1041.767325065" lastFinishedPulling="2025-11-21 15:53:08.18682338 +0000 UTC m=+1076.445344388" observedRunningTime="2025-11-21 15:53:10.995667456 +0000 UTC m=+1079.254188464" watchObservedRunningTime="2025-11-21 15:53:11.005851178 +0000 UTC m=+1079.264372186" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.758018 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" event={"ID":"5d919036-74e8-4637-b93e-fefc337cf51a","Type":"ContainerStarted","Data":"5ee8ec2efb171a261bb292b3f8f2abf7d27dbe114ab3a3f3642e1ae8d23ed13b"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.758382 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.761620 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" event={"ID":"43bebf9f-0691-416d-91e2-232a3a4d37d8","Type":"ContainerStarted","Data":"ca2a1ea0b49f4d8df967f8a6f2fc112581b3277932c125064f9ae1bf16b0b46b"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.762207 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.763929 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" event={"ID":"e8ef978f-0513-4008-a8f5-07c52a1979bb","Type":"ContainerStarted","Data":"991d895382b173a0a90590108fece60cad15c95458b6773dc9de578e554f1e64"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.764373 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.766111 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" event={"ID":"680de92b-f127-4cb3-86c4-3e4b9ae183df","Type":"ContainerStarted","Data":"d2bcf716302f324d884829506fc41a5c92bb9757171bf7ddf63f520745047048"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.766233 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.767724 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" event={"ID":"bb306581-4364-431e-866d-49a92b74eab5","Type":"ContainerStarted","Data":"c102745e3df18e2c73417b31c417b836ddfc4e851759bfc819e69a52e3a086f1"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.768163 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.769937 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" event={"ID":"d1b75c6e-f666-4046-8f22-9a6fd96f9442","Type":"ContainerStarted","Data":"ec2469a64509b6b998145bafd5c54e18cdc25c58ffccb7e78c4e394fba5e4387"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.770409 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.773235 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" event={"ID":"c8665113-6713-4abd-8d58-66c16f2d678a","Type":"ContainerStarted","Data":"19f06aa9ec76bdc9840ef3a3890544bac46be8cc3a49cb75b5bcd86e1ebad454"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.773671 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.775805 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" event={"ID":"8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d","Type":"ContainerStarted","Data":"b6382f2122ce331e871faab97f2e31eca1eceb36313b7047b0705d2d8f20e96d"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.776232 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.778600 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" event={"ID":"0b76a724-2c3b-47e1-a6bd-daada9e96cea","Type":"ContainerStarted","Data":"9de0d6231d007ad59d5bd5af35cd8ab873eb836d5eefa9e5b5ff9bec788ab179"} Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.780653 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" podStartSLOduration=8.029614482 podStartE2EDuration="41.780641522s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.315562312 +0000 UTC m=+1041.574083310" lastFinishedPulling="2025-11-21 15:53:07.066589342 +0000 UTC m=+1075.325110350" observedRunningTime="2025-11-21 15:53:11.779514 +0000 UTC m=+1080.038035018" watchObservedRunningTime="2025-11-21 15:53:11.780641522 +0000 UTC m=+1080.039162530" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.858099 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" podStartSLOduration=9.504150577 podStartE2EDuration="41.858078375s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.115063918 +0000 UTC m=+1041.373584926" lastFinishedPulling="2025-11-21 15:53:05.468991716 +0000 UTC m=+1073.727512724" observedRunningTime="2025-11-21 15:53:11.807108952 +0000 UTC m=+1080.065629950" watchObservedRunningTime="2025-11-21 15:53:11.858078375 +0000 UTC m=+1080.116599383" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.860903 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" podStartSLOduration=8.771195102 podStartE2EDuration="41.860895725s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:32.379292623 +0000 UTC m=+1040.637813631" lastFinishedPulling="2025-11-21 15:53:05.468993246 +0000 UTC m=+1073.727514254" observedRunningTime="2025-11-21 15:53:11.852104123 +0000 UTC m=+1080.110625131" watchObservedRunningTime="2025-11-21 15:53:11.860895725 +0000 UTC m=+1080.119416733" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.880429 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" podStartSLOduration=12.445581157 podStartE2EDuration="41.880409775s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.428655597 +0000 UTC m=+1041.687176605" lastFinishedPulling="2025-11-21 15:53:02.863484215 +0000 UTC m=+1071.122005223" observedRunningTime="2025-11-21 15:53:11.878647715 +0000 UTC m=+1080.137168713" watchObservedRunningTime="2025-11-21 15:53:11.880409775 +0000 UTC m=+1080.138930783" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.911667 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" podStartSLOduration=4.645075845 podStartE2EDuration="41.911654102s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.429558613 +0000 UTC m=+1041.688079621" lastFinishedPulling="2025-11-21 15:53:10.69613687 +0000 UTC m=+1078.954657878" observedRunningTime="2025-11-21 15:53:11.903268201 +0000 UTC m=+1080.161789209" watchObservedRunningTime="2025-11-21 15:53:11.911654102 +0000 UTC m=+1080.170175110" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.938412 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" podStartSLOduration=11.298466748 podStartE2EDuration="41.938392109s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:32.224197892 +0000 UTC m=+1040.482718900" lastFinishedPulling="2025-11-21 15:53:02.864123263 +0000 UTC m=+1071.122644261" observedRunningTime="2025-11-21 15:53:11.93526499 +0000 UTC m=+1080.193785998" watchObservedRunningTime="2025-11-21 15:53:11.938392109 +0000 UTC m=+1080.196913137" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.978811 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" podStartSLOduration=10.767231414 podStartE2EDuration="41.978795869s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.59393406 +0000 UTC m=+1041.852455068" lastFinishedPulling="2025-11-21 15:53:04.805498515 +0000 UTC m=+1073.064019523" observedRunningTime="2025-11-21 15:53:11.97812126 +0000 UTC m=+1080.236642258" watchObservedRunningTime="2025-11-21 15:53:11.978795869 +0000 UTC m=+1080.237316877" Nov 21 15:53:11 crc kubenswrapper[4967]: I1121 15:53:11.997784 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.044513 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" podStartSLOduration=7.480294065 podStartE2EDuration="41.044497374s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.502506136 +0000 UTC m=+1041.761027144" lastFinishedPulling="2025-11-21 15:53:07.066709445 +0000 UTC m=+1075.325230453" observedRunningTime="2025-11-21 15:53:12.010643963 +0000 UTC m=+1080.269164991" watchObservedRunningTime="2025-11-21 15:53:12.044497374 +0000 UTC m=+1080.303018372" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.046046 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" podStartSLOduration=12.548729048 podStartE2EDuration="42.046035739s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.366686379 +0000 UTC m=+1041.625207387" lastFinishedPulling="2025-11-21 15:53:02.86399307 +0000 UTC m=+1071.122514078" observedRunningTime="2025-11-21 15:53:12.041401876 +0000 UTC m=+1080.299922884" watchObservedRunningTime="2025-11-21 15:53:12.046035739 +0000 UTC m=+1080.304556747" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.797749 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" event={"ID":"51e2d793-61c7-4587-ac51-fb644591ef74","Type":"ContainerStarted","Data":"6bc34bc27c2f27d2a9cd47c79bc96672bf936e6d26f04f18ee592d98ff27727e"} Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.797914 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.801094 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" event={"ID":"fcb67210-f9d3-483b-aa07-6f332130450c","Type":"ContainerStarted","Data":"4fb82e463e628ee06cb61ef4ba20d7ebaafa5fc14b9e2e86749df02f6e40f521"} Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.801823 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.804984 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" event={"ID":"95150f6f-2cf8-490f-a9fe-c01038ca1807","Type":"ContainerStarted","Data":"5fd8fb3587a473ddd42e8f03d06fd35de59676225b0897c28655afc6e4420efb"} Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.805484 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.818181 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" event={"ID":"8853dca4-97bf-4b91-9523-a383122bd470","Type":"ContainerStarted","Data":"1b6fe24b8eac63018a70b984e4474a2785e53f5ef85d1164a8e49bfb45251841"} Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.818238 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.818735 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.860697 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" podStartSLOduration=4.600650219 podStartE2EDuration="41.860674867s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.037608393 +0000 UTC m=+1042.296129401" lastFinishedPulling="2025-11-21 15:53:11.297633041 +0000 UTC m=+1079.556154049" observedRunningTime="2025-11-21 15:53:12.825843587 +0000 UTC m=+1081.084364595" watchObservedRunningTime="2025-11-21 15:53:12.860674867 +0000 UTC m=+1081.119195875" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.884337 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" podStartSLOduration=5.01911108 podStartE2EDuration="42.884301165s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.440702193 +0000 UTC m=+1041.699223201" lastFinishedPulling="2025-11-21 15:53:11.305892278 +0000 UTC m=+1079.564413286" observedRunningTime="2025-11-21 15:53:12.859775861 +0000 UTC m=+1081.118296879" watchObservedRunningTime="2025-11-21 15:53:12.884301165 +0000 UTC m=+1081.142822173" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.887746 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" podStartSLOduration=5.030691621 podStartE2EDuration="42.887735643s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.440460126 +0000 UTC m=+1041.698981134" lastFinishedPulling="2025-11-21 15:53:11.297504148 +0000 UTC m=+1079.556025156" observedRunningTime="2025-11-21 15:53:12.884082368 +0000 UTC m=+1081.142603376" watchObservedRunningTime="2025-11-21 15:53:12.887735643 +0000 UTC m=+1081.146256661" Nov 21 15:53:12 crc kubenswrapper[4967]: I1121 15:53:12.912005 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" podStartSLOduration=4.991167277 podStartE2EDuration="42.911986869s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.385518229 +0000 UTC m=+1041.644039237" lastFinishedPulling="2025-11-21 15:53:11.306337821 +0000 UTC m=+1079.564858829" observedRunningTime="2025-11-21 15:53:12.903948309 +0000 UTC m=+1081.162469327" watchObservedRunningTime="2025-11-21 15:53:12.911986869 +0000 UTC m=+1081.170507877" Nov 21 15:53:13 crc kubenswrapper[4967]: I1121 15:53:13.825501 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-dvhqb" Nov 21 15:53:13 crc kubenswrapper[4967]: I1121 15:53:13.825552 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-65jcd" Nov 21 15:53:13 crc kubenswrapper[4967]: I1121 15:53:13.826432 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-dcz62" Nov 21 15:53:20 crc kubenswrapper[4967]: I1121 15:53:20.941889 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-pmb82" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.070195 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-c8rct" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.092061 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-lrlvb" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.417148 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-44jql" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.471518 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-bxhgh" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.474941 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-kvg22" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.483762 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9m8rh" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.520684 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-xjrxl" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.649602 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-qjbwj" Nov 21 15:53:21 crc kubenswrapper[4967]: I1121 15:53:21.773053 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-jt8hb" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.000524 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-jsj2l" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.046195 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-f78mk" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.103254 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-b8f9d" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.164686 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-654d9964b7-j7n2g" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.196324 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-b4c496f69-wr7n5" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.238093 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-8c6448b9f-qh6wx" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.549431 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-xq76g" Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.894073 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" event={"ID":"e107a04d-5715-481d-94d7-b99ad7f3e95d","Type":"ContainerStarted","Data":"576b3bf700ac2e3fa9fdc8388f9e347730dad2bf9b299cb0632bde1a5a3b4a05"} Nov 21 15:53:22 crc kubenswrapper[4967]: I1121 15:53:22.907029 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj" podStartSLOduration=3.692393734 podStartE2EDuration="51.907009746s" podCreationTimestamp="2025-11-21 15:52:31 +0000 UTC" firstStartedPulling="2025-11-21 15:52:33.987794103 +0000 UTC m=+1042.246315111" lastFinishedPulling="2025-11-21 15:53:22.202410115 +0000 UTC m=+1090.460931123" observedRunningTime="2025-11-21 15:53:22.906048358 +0000 UTC m=+1091.164569376" watchObservedRunningTime="2025-11-21 15:53:22.907009746 +0000 UTC m=+1091.165530754" Nov 21 15:53:23 crc kubenswrapper[4967]: I1121 15:53:23.905414 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" event={"ID":"ffe49522-20f7-4f17-9209-a782306baf71","Type":"ContainerStarted","Data":"7f3b1bed40020a9e30f5274d81933179f15e9704aeb6f0ff2316edaaa9650410"} Nov 21 15:53:23 crc kubenswrapper[4967]: I1121 15:53:23.905985 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:53:23 crc kubenswrapper[4967]: I1121 15:53:23.930663 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" podStartSLOduration=5.134956323 podStartE2EDuration="53.930642411s" podCreationTimestamp="2025-11-21 15:52:30 +0000 UTC" firstStartedPulling="2025-11-21 15:52:34.189040598 +0000 UTC m=+1042.447561606" lastFinishedPulling="2025-11-21 15:53:22.984726686 +0000 UTC m=+1091.243247694" observedRunningTime="2025-11-21 15:53:23.923490286 +0000 UTC m=+1092.182011304" watchObservedRunningTime="2025-11-21 15:53:23.930642411 +0000 UTC m=+1092.189163419" Nov 21 15:53:32 crc kubenswrapper[4967]: I1121 15:53:32.930296 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-9rqgf" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.197603 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.201551 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.203917 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.204092 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.205776 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-wpvxb" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.212059 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.218261 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.296652 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.301058 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.306848 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.326422 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.359208 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.359304 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfqg2\" (UniqueName: \"kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.359357 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.360069 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96cs7\" (UniqueName: \"kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.360220 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.461691 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96cs7\" (UniqueName: \"kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.461850 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.461920 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.462018 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfqg2\" (UniqueName: \"kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.462064 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.463359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.463416 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.464247 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.495454 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96cs7\" (UniqueName: \"kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7\") pod \"dnsmasq-dns-78dd6ddcc-wnnsx\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.503775 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfqg2\" (UniqueName: \"kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2\") pod \"dnsmasq-dns-675f4bcbfc-kf527\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.523562 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:53:50 crc kubenswrapper[4967]: I1121 15:53:50.643640 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:53:51 crc kubenswrapper[4967]: I1121 15:53:51.064302 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:53:51 crc kubenswrapper[4967]: I1121 15:53:51.137404 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" event={"ID":"1909ee8f-a585-43c9-809f-7cc80c5ec524","Type":"ContainerStarted","Data":"654e526e081c24533885e6648db12fed377d0e23916a464a42b8b80a40026322"} Nov 21 15:53:51 crc kubenswrapper[4967]: I1121 15:53:51.161751 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:53:52 crc kubenswrapper[4967]: I1121 15:53:52.156903 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" event={"ID":"3a29d511-33be-481c-93d5-2bacc9cabf22","Type":"ContainerStarted","Data":"87e5c871da2afb15e0ac82813e41266a8f9ca336d441eb65d720bf170183065e"} Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.442922 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.475979 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.483424 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.487077 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.519275 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f4b2\" (UniqueName: \"kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.519408 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.519460 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.621681 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f4b2\" (UniqueName: \"kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.623887 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.624030 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.625224 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.625722 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.660780 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f4b2\" (UniqueName: \"kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2\") pod \"dnsmasq-dns-666b6646f7-hwdts\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.812362 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.821472 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.828970 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.830747 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.857061 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.939908 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.939979 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:53 crc kubenswrapper[4967]: I1121 15:53:53.940281 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z784j\" (UniqueName: \"kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.042364 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.042444 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z784j\" (UniqueName: \"kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.042532 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.043475 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.044325 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.067788 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z784j\" (UniqueName: \"kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j\") pod \"dnsmasq-dns-57d769cc4f-fdphh\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.177464 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.408687 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.613857 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.615522 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.622489 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.622685 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-dmgt2" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.622802 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.622860 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.623037 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.623149 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.627191 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.628081 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.654998 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655039 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2sqg\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655072 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655097 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655222 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655298 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655472 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655557 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655601 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655635 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.655661 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756597 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756879 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2sqg\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756913 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756942 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756976 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.756999 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.757044 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.757079 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.757101 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.757124 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.757142 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.758043 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.758348 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.758725 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.758986 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.759180 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.760942 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.764979 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.764992 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.764983 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.765590 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.774357 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2sqg\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.792651 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.953396 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.953476 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.960553 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.965852 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.965946 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.966025 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-t9gdg" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.966035 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.965852 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.966153 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.966194 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 15:53:54 crc kubenswrapper[4967]: I1121 15:53:54.972767 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.061880 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.061934 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.061965 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062008 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062039 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062065 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062088 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062169 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062192 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062404 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2q9d\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.062425 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.163683 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164022 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164053 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164071 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164131 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164146 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164193 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2q9d\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164210 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164233 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164251 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.164266 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.166003 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.166049 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.166508 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.166731 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.166978 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.171805 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.172014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.172341 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.173334 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.174012 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.178045 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.194463 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2q9d\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.210949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" event={"ID":"95ecc930-ddf3-4ced-a9e8-ade44ada5666","Type":"ContainerStarted","Data":"363e36075dd5f107a287414417fe17a2425392f49eb14ab339ae18450a510852"} Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.212850 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" event={"ID":"946f196e-9c08-497e-9701-7da21de6b10b","Type":"ContainerStarted","Data":"86f7dbc008dfc227dd239a2363578fa11c04e4efe4e4e8d342f25f3236bd930f"} Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.213447 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.334392 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.609360 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 15:53:55 crc kubenswrapper[4967]: W1121 15:53:55.614701 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a455b5d_516b_4e43_a717_f7aa6e326ee8.slice/crio-f6c7bbe292a88d7bc98bdc9745c09ebc4c2ecb4fa1043681c797a36ea49eac60 WatchSource:0}: Error finding container f6c7bbe292a88d7bc98bdc9745c09ebc4c2ecb4fa1043681c797a36ea49eac60: Status 404 returned error can't find the container with id f6c7bbe292a88d7bc98bdc9745c09ebc4c2ecb4fa1043681c797a36ea49eac60 Nov 21 15:53:55 crc kubenswrapper[4967]: I1121 15:53:55.793872 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 15:53:55 crc kubenswrapper[4967]: W1121 15:53:55.796191 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd96c12a3_6ce4_40f6_a655_0881d711f9fa.slice/crio-f08cacbda5650e3b23ba50b3bee4dbb84155050ad4c902ab7fc6bccb01daeece WatchSource:0}: Error finding container f08cacbda5650e3b23ba50b3bee4dbb84155050ad4c902ab7fc6bccb01daeece: Status 404 returned error can't find the container with id f08cacbda5650e3b23ba50b3bee4dbb84155050ad4c902ab7fc6bccb01daeece Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.152562 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.154244 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.156632 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-nzbmc" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.157569 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.159479 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.159787 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.171019 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.174096 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.222800 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerStarted","Data":"f6c7bbe292a88d7bc98bdc9745c09ebc4c2ecb4fa1043681c797a36ea49eac60"} Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.224137 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerStarted","Data":"f08cacbda5650e3b23ba50b3bee4dbb84155050ad4c902ab7fc6bccb01daeece"} Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.286920 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.286967 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287005 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq6kw\" (UniqueName: \"kubernetes.io/projected/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kube-api-access-bq6kw\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287095 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287272 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287406 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287505 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.287624 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.389409 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.389707 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.389798 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.389890 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq6kw\" (UniqueName: \"kubernetes.io/projected/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kube-api-access-bq6kw\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.389991 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.390098 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.390200 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.390298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.390403 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.390334 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.391101 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.391353 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.395527 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.395532 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.396354 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.408877 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq6kw\" (UniqueName: \"kubernetes.io/projected/1d7a085e-ccb2-4791-9bdb-e3c564e8b450-kube-api-access-bq6kw\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.425597 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"1d7a085e-ccb2-4791-9bdb-e3c564e8b450\") " pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.475901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 15:53:56 crc kubenswrapper[4967]: I1121 15:53:56.926404 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.234653 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d7a085e-ccb2-4791-9bdb-e3c564e8b450","Type":"ContainerStarted","Data":"7f72420983d2942d23daa54ab1e8da46a183cd5dd3eaecfe4bc714db45e642e8"} Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.550034 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.552215 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.556450 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.556611 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-k6tbt" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.556772 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.556889 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.561423 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724265 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724331 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724393 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724452 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724484 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724535 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724613 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwhhb\" (UniqueName: \"kubernetes.io/projected/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kube-api-access-qwhhb\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.724640 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826761 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwhhb\" (UniqueName: \"kubernetes.io/projected/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kube-api-access-qwhhb\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826807 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826834 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826854 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826897 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826954 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.826992 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.827029 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.827388 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.828897 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.829391 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.830784 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.834832 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.843071 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3d39ab9-f219-4af5-b82c-102fefaff9bc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.853322 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.857447 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b3d39ab9-f219-4af5-b82c-102fefaff9bc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.868293 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwhhb\" (UniqueName: \"kubernetes.io/projected/b3d39ab9-f219-4af5-b82c-102fefaff9bc-kube-api-access-qwhhb\") pod \"openstack-cell1-galera-0\" (UID: \"b3d39ab9-f219-4af5-b82c-102fefaff9bc\") " pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.878427 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.946000 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.947669 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.952671 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.952823 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.953412 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-htqjc" Nov 21 15:53:57 crc kubenswrapper[4967]: I1121 15:53:57.959011 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.031975 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-config-data\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.032092 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.032165 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kolla-config\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.032265 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmrt7\" (UniqueName: \"kubernetes.io/projected/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kube-api-access-hmrt7\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.032607 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.134734 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-config-data\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.134855 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.134928 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kolla-config\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.134990 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmrt7\" (UniqueName: \"kubernetes.io/projected/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kube-api-access-hmrt7\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.135102 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.135934 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-config-data\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.136071 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kolla-config\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.140948 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-combined-ca-bundle\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.155110 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-memcached-tls-certs\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.164865 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmrt7\" (UniqueName: \"kubernetes.io/projected/2af9b421-461a-4411-8a7d-9a0bf5fa8d28-kube-api-access-hmrt7\") pod \"memcached-0\" (UID: \"2af9b421-461a-4411-8a7d-9a0bf5fa8d28\") " pod="openstack/memcached-0" Nov 21 15:53:58 crc kubenswrapper[4967]: I1121 15:53:58.277420 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.615303 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.616851 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:54:00 crc kubenswrapper[4967]: W1121 15:54:00.622190 4967 reflector.go:561] object-"openstack"/"telemetry-ceilometer-dockercfg-9jl96": failed to list *v1.Secret: secrets "telemetry-ceilometer-dockercfg-9jl96" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 21 15:54:00 crc kubenswrapper[4967]: E1121 15:54:00.622234 4967 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"telemetry-ceilometer-dockercfg-9jl96\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"telemetry-ceilometer-dockercfg-9jl96\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.638404 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.684953 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhgx5\" (UniqueName: \"kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5\") pod \"kube-state-metrics-0\" (UID: \"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f\") " pod="openstack/kube-state-metrics-0" Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.788796 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhgx5\" (UniqueName: \"kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5\") pod \"kube-state-metrics-0\" (UID: \"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f\") " pod="openstack/kube-state-metrics-0" Nov 21 15:54:00 crc kubenswrapper[4967]: I1121 15:54:00.809434 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhgx5\" (UniqueName: \"kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5\") pod \"kube-state-metrics-0\" (UID: \"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f\") " pod="openstack/kube-state-metrics-0" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.221692 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.224606 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.227731 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-rfwbm" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.227950 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.242448 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.306986 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghxbg\" (UniqueName: \"kubernetes.io/projected/55a13dc8-8cec-4642-9c0b-3c6799d942fc-kube-api-access-ghxbg\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.307485 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.409181 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.409266 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghxbg\" (UniqueName: \"kubernetes.io/projected/55a13dc8-8cec-4642-9c0b-3c6799d942fc-kube-api-access-ghxbg\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: E1121 15:54:01.409550 4967 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Nov 21 15:54:01 crc kubenswrapper[4967]: E1121 15:54:01.409647 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert podName:55a13dc8-8cec-4642-9c0b-3c6799d942fc nodeName:}" failed. No retries permitted until 2025-11-21 15:54:01.909620278 +0000 UTC m=+1130.168141346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-b7zgw" (UID: "55a13dc8-8cec-4642-9c0b-3c6799d942fc") : secret "observability-ui-dashboards" not found Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.441282 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghxbg\" (UniqueName: \"kubernetes.io/projected/55a13dc8-8cec-4642-9c0b-3c6799d942fc-kube-api-access-ghxbg\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.601734 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-9dd55876b-jvsbc"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.603327 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.638439 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-9dd55876b-jvsbc"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.714488 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.714856 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.714918 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-oauth-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.715030 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p745g\" (UniqueName: \"kubernetes.io/projected/6a507087-6dd3-47ba-863f-17f2b8e9503a-kube-api-access-p745g\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.715088 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-trusted-ca-bundle\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.715107 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-service-ca\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.715126 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-oauth-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.738861 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-9jl96" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.747633 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817334 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p745g\" (UniqueName: \"kubernetes.io/projected/6a507087-6dd3-47ba-863f-17f2b8e9503a-kube-api-access-p745g\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-trusted-ca-bundle\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817495 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-service-ca\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817524 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-oauth-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817606 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817647 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.817700 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-oauth-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.819496 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-service-ca\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.823699 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-oauth-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.824982 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.835041 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p745g\" (UniqueName: \"kubernetes.io/projected/6a507087-6dd3-47ba-863f-17f2b8e9503a-kube-api-access-p745g\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.843884 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-trusted-ca-bundle\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.844425 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-oauth-serving-cert\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.846001 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a507087-6dd3-47ba-863f-17f2b8e9503a-console-config\") pod \"console-9dd55876b-jvsbc\" (UID: \"6a507087-6dd3-47ba-863f-17f2b8e9503a\") " pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.863854 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.892730 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.899224 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.899451 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-lsqp4" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.899692 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.900325 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.909154 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.913589 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.919634 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.934387 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.967021 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/55a13dc8-8cec-4642-9c0b-3c6799d942fc-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-b7zgw\" (UID: \"55a13dc8-8cec-4642-9c0b-3c6799d942fc\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:01 crc kubenswrapper[4967]: I1121 15:54:01.967148 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021180 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021227 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021328 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021365 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021423 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021453 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.021487 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9zs6\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123535 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9zs6\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123673 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123703 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123731 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123828 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123872 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123938 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.123985 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.125065 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.126664 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.127163 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bfa69cc79b34a22cf414a992ebcad53d044f622cf1f4723e377bf86e9c5e1255/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.127170 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.127206 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.127755 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.129504 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.143564 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.143722 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9zs6\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.157256 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.166853 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:02 crc kubenswrapper[4967]: I1121 15:54:02.291115 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.493516 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.495984 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.498088 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.498454 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.504701 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.504941 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-pwnq4" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.505100 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.515751 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.560835 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561240 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561376 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561674 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561802 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-config\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.561906 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ml4t\" (UniqueName: \"kubernetes.io/projected/b0cdc464-b000-47e5-a8d8-0a881ba447c1-kube-api-access-7ml4t\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.562040 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663614 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663683 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663727 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663760 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-config\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663830 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ml4t\" (UniqueName: \"kubernetes.io/projected/b0cdc464-b000-47e5-a8d8-0a881ba447c1-kube-api-access-7ml4t\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663881 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.663946 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.672624 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.672985 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.674035 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.674196 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0cdc464-b000-47e5-a8d8-0a881ba447c1-config\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.675882 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.677130 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.679488 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0cdc464-b000-47e5-a8d8-0a881ba447c1-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.720147 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ml4t\" (UniqueName: \"kubernetes.io/projected/b0cdc464-b000-47e5-a8d8-0a881ba447c1-kube-api-access-7ml4t\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.745571 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"b0cdc464-b000-47e5-a8d8-0a881ba447c1\") " pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.869827 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.968809 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5c2pr"] Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.970519 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.973716 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.974157 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-pgjd2" Nov 21 15:54:03 crc kubenswrapper[4967]: I1121 15:54:03.974550 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.011066 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-n8r27"] Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.014290 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.038065 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5c2pr"] Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.053019 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-n8r27"] Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073520 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-ovn-controller-tls-certs\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073609 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073686 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-log-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw7zr\" (UniqueName: \"kubernetes.io/projected/e04788f9-f223-46ef-b96b-24e05c5d911f-kube-api-access-lw7zr\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073780 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073876 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04788f9-f223-46ef-b96b-24e05c5d911f-scripts\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.073926 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-combined-ca-bundle\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175487 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-run\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175539 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04788f9-f223-46ef-b96b-24e05c5d911f-scripts\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175571 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83956a70-e80b-424b-9396-8febf34b60ed-scripts\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175608 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-combined-ca-bundle\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175642 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-ovn-controller-tls-certs\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175665 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv8bc\" (UniqueName: \"kubernetes.io/projected/83956a70-e80b-424b-9396-8febf34b60ed-kube-api-access-zv8bc\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175700 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175735 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-log-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175764 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-log\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175782 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw7zr\" (UniqueName: \"kubernetes.io/projected/e04788f9-f223-46ef-b96b-24e05c5d911f-kube-api-access-lw7zr\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175803 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175843 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-etc-ovs\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.175881 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-lib\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.177000 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.180790 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e04788f9-f223-46ef-b96b-24e05c5d911f-scripts\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.181250 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-log-ovn\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.181621 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e04788f9-f223-46ef-b96b-24e05c5d911f-var-run\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.184183 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-ovn-controller-tls-certs\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.198680 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e04788f9-f223-46ef-b96b-24e05c5d911f-combined-ca-bundle\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.201229 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw7zr\" (UniqueName: \"kubernetes.io/projected/e04788f9-f223-46ef-b96b-24e05c5d911f-kube-api-access-lw7zr\") pod \"ovn-controller-5c2pr\" (UID: \"e04788f9-f223-46ef-b96b-24e05c5d911f\") " pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278097 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-log\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278171 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-etc-ovs\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278205 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-lib\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278236 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-run\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278287 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83956a70-e80b-424b-9396-8febf34b60ed-scripts\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.278361 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv8bc\" (UniqueName: \"kubernetes.io/projected/83956a70-e80b-424b-9396-8febf34b60ed-kube-api-access-zv8bc\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.279673 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-log\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.279826 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-etc-ovs\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.279933 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-lib\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.279981 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/83956a70-e80b-424b-9396-8febf34b60ed-var-run\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.286356 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/83956a70-e80b-424b-9396-8febf34b60ed-scripts\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.296747 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv8bc\" (UniqueName: \"kubernetes.io/projected/83956a70-e80b-424b-9396-8febf34b60ed-kube-api-access-zv8bc\") pod \"ovn-controller-ovs-n8r27\" (UID: \"83956a70-e80b-424b-9396-8febf34b60ed\") " pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.301498 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:04 crc kubenswrapper[4967]: I1121 15:54:04.337762 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.228727 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.231015 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.233186 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.233877 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-ngdmt" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.234089 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.234981 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.247301 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.343893 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.343953 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.343978 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbg48\" (UniqueName: \"kubernetes.io/projected/1af05a7c-7c8a-42fa-a520-047cc273227b-kube-api-access-hbg48\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.344111 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.344306 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-config\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.344526 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.344550 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.344727 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446560 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446616 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446639 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbg48\" (UniqueName: \"kubernetes.io/projected/1af05a7c-7c8a-42fa-a520-047cc273227b-kube-api-access-hbg48\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446676 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446726 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-config\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446803 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446826 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.446884 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.447737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.447856 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.448480 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-config\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.448696 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1af05a7c-7c8a-42fa-a520-047cc273227b-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.451853 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.452197 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.453440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1af05a7c-7c8a-42fa-a520-047cc273227b-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.465030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbg48\" (UniqueName: \"kubernetes.io/projected/1af05a7c-7c8a-42fa-a520-047cc273227b-kube-api-access-hbg48\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.473550 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"1af05a7c-7c8a-42fa-a520-047cc273227b\") " pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:07 crc kubenswrapper[4967]: I1121 15:54:07.558139 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:14 crc kubenswrapper[4967]: I1121 15:54:14.286539 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.795826 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.796280 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rfqg2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-kf527_openstack(1909ee8f-a585-43c9-809f-7cc80c5ec524): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.797379 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" podUID="1909ee8f-a585-43c9-809f-7cc80c5ec524" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.825377 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.825625 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f2q9d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(d96c12a3-6ce4-40f6-a655-0881d711f9fa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.828958 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.838884 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.839073 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-96cs7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-wnnsx_openstack(3a29d511-33be-481c-93d5-2bacc9cabf22): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:54:14 crc kubenswrapper[4967]: E1121 15:54:14.840204 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" podUID="3a29d511-33be-481c-93d5-2bacc9cabf22" Nov 21 15:54:15 crc kubenswrapper[4967]: E1121 15:54:15.462022 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" Nov 21 15:54:16 crc kubenswrapper[4967]: W1121 15:54:16.481737 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd44bd64f_3c97_4cd8_be5e_2cabe45480a0.slice/crio-3bf39713487c2db7e8d15561184c200b35e5192035d5c33ed72a2df5a1cb939b WatchSource:0}: Error finding container 3bf39713487c2db7e8d15561184c200b35e5192035d5c33ed72a2df5a1cb939b: Status 404 returned error can't find the container with id 3bf39713487c2db7e8d15561184c200b35e5192035d5c33ed72a2df5a1cb939b Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.522074 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.522138 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.719622 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.720055 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.872277 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfqg2\" (UniqueName: \"kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2\") pod \"1909ee8f-a585-43c9-809f-7cc80c5ec524\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.872689 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96cs7\" (UniqueName: \"kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7\") pod \"3a29d511-33be-481c-93d5-2bacc9cabf22\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.872736 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc\") pod \"3a29d511-33be-481c-93d5-2bacc9cabf22\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.872896 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config\") pod \"3a29d511-33be-481c-93d5-2bacc9cabf22\" (UID: \"3a29d511-33be-481c-93d5-2bacc9cabf22\") " Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.872943 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config\") pod \"1909ee8f-a585-43c9-809f-7cc80c5ec524\" (UID: \"1909ee8f-a585-43c9-809f-7cc80c5ec524\") " Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.874058 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config" (OuterVolumeSpecName: "config") pod "1909ee8f-a585-43c9-809f-7cc80c5ec524" (UID: "1909ee8f-a585-43c9-809f-7cc80c5ec524"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.876578 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3a29d511-33be-481c-93d5-2bacc9cabf22" (UID: "3a29d511-33be-481c-93d5-2bacc9cabf22"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.877457 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config" (OuterVolumeSpecName: "config") pod "3a29d511-33be-481c-93d5-2bacc9cabf22" (UID: "3a29d511-33be-481c-93d5-2bacc9cabf22"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.882731 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2" (OuterVolumeSpecName: "kube-api-access-rfqg2") pod "1909ee8f-a585-43c9-809f-7cc80c5ec524" (UID: "1909ee8f-a585-43c9-809f-7cc80c5ec524"). InnerVolumeSpecName "kube-api-access-rfqg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.883717 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7" (OuterVolumeSpecName: "kube-api-access-96cs7") pod "3a29d511-33be-481c-93d5-2bacc9cabf22" (UID: "3a29d511-33be-481c-93d5-2bacc9cabf22"). InnerVolumeSpecName "kube-api-access-96cs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.975585 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96cs7\" (UniqueName: \"kubernetes.io/projected/3a29d511-33be-481c-93d5-2bacc9cabf22-kube-api-access-96cs7\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.975623 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.975635 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a29d511-33be-481c-93d5-2bacc9cabf22-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.975644 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1909ee8f-a585-43c9-809f-7cc80c5ec524-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:16 crc kubenswrapper[4967]: I1121 15:54:16.975653 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfqg2\" (UniqueName: \"kubernetes.io/projected/1909ee8f-a585-43c9-809f-7cc80c5ec524-kube-api-access-rfqg2\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.079290 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 15:54:17 crc kubenswrapper[4967]: W1121 15:54:17.128122 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2af9b421_461a_4411_8a7d_9a0bf5fa8d28.slice/crio-422e5ed78e502e7dc3b8469314375fd7252b17c6ff8921712750decd3cde189d WatchSource:0}: Error finding container 422e5ed78e502e7dc3b8469314375fd7252b17c6ff8921712750decd3cde189d: Status 404 returned error can't find the container with id 422e5ed78e502e7dc3b8469314375fd7252b17c6ff8921712750decd3cde189d Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.203516 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5c2pr"] Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.490975 4967 generic.go:334] "Generic (PLEG): container finished" podID="946f196e-9c08-497e-9701-7da21de6b10b" containerID="4c97d011610ef1cc51f6e90b29782a88eb142afdf04bf719202a6585946e08a1" exitCode=0 Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.491043 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" event={"ID":"946f196e-9c08-497e-9701-7da21de6b10b","Type":"ContainerDied","Data":"4c97d011610ef1cc51f6e90b29782a88eb142afdf04bf719202a6585946e08a1"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.494496 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerStarted","Data":"3bf39713487c2db7e8d15561184c200b35e5192035d5c33ed72a2df5a1cb939b"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.498573 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2af9b421-461a-4411-8a7d-9a0bf5fa8d28","Type":"ContainerStarted","Data":"422e5ed78e502e7dc3b8469314375fd7252b17c6ff8921712750decd3cde189d"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.500037 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d7a085e-ccb2-4791-9bdb-e3c564e8b450","Type":"ContainerStarted","Data":"6a656f15d5c6fca745d74602f23ae43e148631e1110cad399ede314a5cf45684"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.502587 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5c2pr" event={"ID":"e04788f9-f223-46ef-b96b-24e05c5d911f","Type":"ContainerStarted","Data":"7fc14ba3d2efa271047243f9b7b9f62bf17b44de01e73c564c9c13ee0d0ed3c3"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.503768 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" event={"ID":"1909ee8f-a585-43c9-809f-7cc80c5ec524","Type":"ContainerDied","Data":"654e526e081c24533885e6648db12fed377d0e23916a464a42b8b80a40026322"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.503809 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-kf527" Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.506590 4967 generic.go:334] "Generic (PLEG): container finished" podID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerID="7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21" exitCode=0 Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.506857 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" event={"ID":"95ecc930-ddf3-4ced-a9e8-ade44ada5666","Type":"ContainerDied","Data":"7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.507702 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" event={"ID":"3a29d511-33be-481c-93d5-2bacc9cabf22","Type":"ContainerDied","Data":"87e5c871da2afb15e0ac82813e41266a8f9ca336d441eb65d720bf170183065e"} Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.507765 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wnnsx" Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.681821 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.691706 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-9dd55876b-jvsbc"] Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.698577 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 15:54:17 crc kubenswrapper[4967]: W1121 15:54:17.716497 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1af05a7c_7c8a_42fa_a520_047cc273227b.slice/crio-b48af53de770886e90324dca3db5abff9b19e6d27c45adeadfccdbfbbe35ca98 WatchSource:0}: Error finding container b48af53de770886e90324dca3db5abff9b19e6d27c45adeadfccdbfbbe35ca98: Status 404 returned error can't find the container with id b48af53de770886e90324dca3db5abff9b19e6d27c45adeadfccdbfbbe35ca98 Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.723334 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.831527 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw"] Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.839512 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:54:17 crc kubenswrapper[4967]: W1121 15:54:17.845241 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52658e97_d6ec_4a6d_ac6b_a5168a5ab42f.slice/crio-f29eeda15d8ce06646ef08c9c2f0c59a70413d50ca762dc9e72b3fef36ba234b WatchSource:0}: Error finding container f29eeda15d8ce06646ef08c9c2f0c59a70413d50ca762dc9e72b3fef36ba234b: Status 404 returned error can't find the container with id f29eeda15d8ce06646ef08c9c2f0c59a70413d50ca762dc9e72b3fef36ba234b Nov 21 15:54:17 crc kubenswrapper[4967]: W1121 15:54:17.848828 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55a13dc8_8cec_4642_9c0b_3c6799d942fc.slice/crio-64a4db117ccc7b580b900bf10008b8eff08a01247ff42adbdf4d87bd04a06806 WatchSource:0}: Error finding container 64a4db117ccc7b580b900bf10008b8eff08a01247ff42adbdf4d87bd04a06806: Status 404 returned error can't find the container with id 64a4db117ccc7b580b900bf10008b8eff08a01247ff42adbdf4d87bd04a06806 Nov 21 15:54:17 crc kubenswrapper[4967]: I1121 15:54:17.988672 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-n8r27"] Nov 21 15:54:17 crc kubenswrapper[4967]: W1121 15:54:17.994494 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83956a70_e80b_424b_9396_8febf34b60ed.slice/crio-7e2c7f50c7f02741120b79dcbc5dc8f9b7b8b7e5404bf6af1effe85d90abf3f0 WatchSource:0}: Error finding container 7e2c7f50c7f02741120b79dcbc5dc8f9b7b8b7e5404bf6af1effe85d90abf3f0: Status 404 returned error can't find the container with id 7e2c7f50c7f02741120b79dcbc5dc8f9b7b8b7e5404bf6af1effe85d90abf3f0 Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.183337 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.194296 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wnnsx"] Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.211058 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.218352 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-kf527"] Nov 21 15:54:18 crc kubenswrapper[4967]: E1121 15:54:18.407424 4967 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 21 15:54:18 crc kubenswrapper[4967]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/946f196e-9c08-497e-9701-7da21de6b10b/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 21 15:54:18 crc kubenswrapper[4967]: > podSandboxID="86f7dbc008dfc227dd239a2363578fa11c04e4efe4e4e8d342f25f3236bd930f" Nov 21 15:54:18 crc kubenswrapper[4967]: E1121 15:54:18.408192 4967 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 21 15:54:18 crc kubenswrapper[4967]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7f4b2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-hwdts_openstack(946f196e-9c08-497e-9701-7da21de6b10b): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/946f196e-9c08-497e-9701-7da21de6b10b/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 21 15:54:18 crc kubenswrapper[4967]: > logger="UnhandledError" Nov 21 15:54:18 crc kubenswrapper[4967]: E1121 15:54:18.409443 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/946f196e-9c08-497e-9701-7da21de6b10b/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" podUID="946f196e-9c08-497e-9701-7da21de6b10b" Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.525613 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerStarted","Data":"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.530000 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-9dd55876b-jvsbc" event={"ID":"6a507087-6dd3-47ba-863f-17f2b8e9503a","Type":"ContainerStarted","Data":"2f754952fc7547f5068c6f227443e760b7e72418b9777581a3954eb5dc0a5add"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.530045 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-9dd55876b-jvsbc" event={"ID":"6a507087-6dd3-47ba-863f-17f2b8e9503a","Type":"ContainerStarted","Data":"d113978e45d54bb15093d217f1cc57ad1964bebf27af5c4d35c24355a5e18fff"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.533411 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"b0cdc464-b000-47e5-a8d8-0a881ba447c1","Type":"ContainerStarted","Data":"3245b530d94da46fc954d930e3669e8108bcbddecaca73b91108680ded613e45"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.535110 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n8r27" event={"ID":"83956a70-e80b-424b-9396-8febf34b60ed","Type":"ContainerStarted","Data":"7e2c7f50c7f02741120b79dcbc5dc8f9b7b8b7e5404bf6af1effe85d90abf3f0"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.537002 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" event={"ID":"55a13dc8-8cec-4642-9c0b-3c6799d942fc","Type":"ContainerStarted","Data":"64a4db117ccc7b580b900bf10008b8eff08a01247ff42adbdf4d87bd04a06806"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.540175 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f","Type":"ContainerStarted","Data":"f29eeda15d8ce06646ef08c9c2f0c59a70413d50ca762dc9e72b3fef36ba234b"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.542346 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b3d39ab9-f219-4af5-b82c-102fefaff9bc","Type":"ContainerStarted","Data":"64fb32bf700b9c4f7bf7426cfd12efc3ac1d5f0db6d66c60fb5a87879f4e1893"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.542373 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b3d39ab9-f219-4af5-b82c-102fefaff9bc","Type":"ContainerStarted","Data":"6e3e2e8c13663173f7efff3d446f679b979079ed5e004ea0c21f62ce69ada1c5"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.568806 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1909ee8f-a585-43c9-809f-7cc80c5ec524" path="/var/lib/kubelet/pods/1909ee8f-a585-43c9-809f-7cc80c5ec524/volumes" Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.569897 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a29d511-33be-481c-93d5-2bacc9cabf22" path="/var/lib/kubelet/pods/3a29d511-33be-481c-93d5-2bacc9cabf22/volumes" Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.570301 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1af05a7c-7c8a-42fa-a520-047cc273227b","Type":"ContainerStarted","Data":"b48af53de770886e90324dca3db5abff9b19e6d27c45adeadfccdbfbbe35ca98"} Nov 21 15:54:18 crc kubenswrapper[4967]: I1121 15:54:18.642728 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-9dd55876b-jvsbc" podStartSLOduration=17.642708835 podStartE2EDuration="17.642708835s" podCreationTimestamp="2025-11-21 15:54:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:18.627631273 +0000 UTC m=+1146.886152281" watchObservedRunningTime="2025-11-21 15:54:18.642708835 +0000 UTC m=+1146.901229843" Nov 21 15:54:19 crc kubenswrapper[4967]: I1121 15:54:19.556682 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" event={"ID":"946f196e-9c08-497e-9701-7da21de6b10b","Type":"ContainerStarted","Data":"0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf"} Nov 21 15:54:19 crc kubenswrapper[4967]: I1121 15:54:19.557417 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:54:19 crc kubenswrapper[4967]: I1121 15:54:19.559956 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" event={"ID":"95ecc930-ddf3-4ced-a9e8-ade44ada5666","Type":"ContainerStarted","Data":"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054"} Nov 21 15:54:19 crc kubenswrapper[4967]: I1121 15:54:19.581759 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" podStartSLOduration=4.44391966 podStartE2EDuration="26.581737221s" podCreationTimestamp="2025-11-21 15:53:53 +0000 UTC" firstStartedPulling="2025-11-21 15:53:54.719596394 +0000 UTC m=+1122.978117402" lastFinishedPulling="2025-11-21 15:54:16.857413955 +0000 UTC m=+1145.115934963" observedRunningTime="2025-11-21 15:54:19.577824719 +0000 UTC m=+1147.836345727" watchObservedRunningTime="2025-11-21 15:54:19.581737221 +0000 UTC m=+1147.840258229" Nov 21 15:54:19 crc kubenswrapper[4967]: I1121 15:54:19.604139 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" podStartSLOduration=4.941194374 podStartE2EDuration="26.604114702s" podCreationTimestamp="2025-11-21 15:53:53 +0000 UTC" firstStartedPulling="2025-11-21 15:53:55.19566543 +0000 UTC m=+1123.454186428" lastFinishedPulling="2025-11-21 15:54:16.858585738 +0000 UTC m=+1145.117106756" observedRunningTime="2025-11-21 15:54:19.600065916 +0000 UTC m=+1147.858586934" watchObservedRunningTime="2025-11-21 15:54:19.604114702 +0000 UTC m=+1147.862635710" Nov 21 15:54:20 crc kubenswrapper[4967]: I1121 15:54:20.572690 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:54:21 crc kubenswrapper[4967]: I1121 15:54:21.582806 4967 generic.go:334] "Generic (PLEG): container finished" podID="1d7a085e-ccb2-4791-9bdb-e3c564e8b450" containerID="6a656f15d5c6fca745d74602f23ae43e148631e1110cad399ede314a5cf45684" exitCode=0 Nov 21 15:54:21 crc kubenswrapper[4967]: I1121 15:54:21.583778 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d7a085e-ccb2-4791-9bdb-e3c564e8b450","Type":"ContainerDied","Data":"6a656f15d5c6fca745d74602f23ae43e148631e1110cad399ede314a5cf45684"} Nov 21 15:54:21 crc kubenswrapper[4967]: I1121 15:54:21.968869 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:21 crc kubenswrapper[4967]: I1121 15:54:21.968928 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:21 crc kubenswrapper[4967]: I1121 15:54:21.973332 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:22 crc kubenswrapper[4967]: I1121 15:54:22.598935 4967 generic.go:334] "Generic (PLEG): container finished" podID="b3d39ab9-f219-4af5-b82c-102fefaff9bc" containerID="64fb32bf700b9c4f7bf7426cfd12efc3ac1d5f0db6d66c60fb5a87879f4e1893" exitCode=0 Nov 21 15:54:22 crc kubenswrapper[4967]: I1121 15:54:22.598994 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b3d39ab9-f219-4af5-b82c-102fefaff9bc","Type":"ContainerDied","Data":"64fb32bf700b9c4f7bf7426cfd12efc3ac1d5f0db6d66c60fb5a87879f4e1893"} Nov 21 15:54:22 crc kubenswrapper[4967]: I1121 15:54:22.604895 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-9dd55876b-jvsbc" Nov 21 15:54:22 crc kubenswrapper[4967]: I1121 15:54:22.773289 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.178538 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.243291 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.243548 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="dnsmasq-dns" containerID="cri-o://0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf" gracePeriod=10 Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.248962 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:54:24 crc kubenswrapper[4967]: E1121 15:54:24.548439 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod946f196e_9c08_497e_9701_7da21de6b10b.slice/crio-conmon-0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf.scope\": RecentStats: unable to find data in memory cache]" Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.650769 4967 generic.go:334] "Generic (PLEG): container finished" podID="946f196e-9c08-497e-9701-7da21de6b10b" containerID="0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf" exitCode=0 Nov 21 15:54:24 crc kubenswrapper[4967]: I1121 15:54:24.650810 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" event={"ID":"946f196e-9c08-497e-9701-7da21de6b10b","Type":"ContainerDied","Data":"0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.027703 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.047058 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f4b2\" (UniqueName: \"kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2\") pod \"946f196e-9c08-497e-9701-7da21de6b10b\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.047260 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config\") pod \"946f196e-9c08-497e-9701-7da21de6b10b\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.047294 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc\") pod \"946f196e-9c08-497e-9701-7da21de6b10b\" (UID: \"946f196e-9c08-497e-9701-7da21de6b10b\") " Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.109674 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2" (OuterVolumeSpecName: "kube-api-access-7f4b2") pod "946f196e-9c08-497e-9701-7da21de6b10b" (UID: "946f196e-9c08-497e-9701-7da21de6b10b"). InnerVolumeSpecName "kube-api-access-7f4b2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.150947 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f4b2\" (UniqueName: \"kubernetes.io/projected/946f196e-9c08-497e-9701-7da21de6b10b-kube-api-access-7f4b2\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.528948 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "946f196e-9c08-497e-9701-7da21de6b10b" (UID: "946f196e-9c08-497e-9701-7da21de6b10b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.535898 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config" (OuterVolumeSpecName: "config") pod "946f196e-9c08-497e-9701-7da21de6b10b" (UID: "946f196e-9c08-497e-9701-7da21de6b10b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.560260 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.560296 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/946f196e-9c08-497e-9701-7da21de6b10b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.660081 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" event={"ID":"946f196e-9c08-497e-9701-7da21de6b10b","Type":"ContainerDied","Data":"86f7dbc008dfc227dd239a2363578fa11c04e4efe4e4e8d342f25f3236bd930f"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.660118 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-hwdts" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.660139 4967 scope.go:117] "RemoveContainer" containerID="0cb247674d5bd95b215300f22a2cf6e9f47f332404aedbc7a305e1a9a007c2cf" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.662093 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1af05a7c-7c8a-42fa-a520-047cc273227b","Type":"ContainerStarted","Data":"fa1629d1239ef20f4c73980328e9d4e3bafd03d91e713401c6ea5ba225c608ba"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.663786 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"2af9b421-461a-4411-8a7d-9a0bf5fa8d28","Type":"ContainerStarted","Data":"f022196b24a26bf1dd81def255aa71065a80a0cffebb7f4dbe1d52360a8f083e"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.663913 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.665155 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5c2pr" event={"ID":"e04788f9-f223-46ef-b96b-24e05c5d911f","Type":"ContainerStarted","Data":"9ce0b44ad8cd5201f369f062274b05dd781474ff746a7b460e16f9d134a6b9d2"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.665247 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-5c2pr" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.666785 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f","Type":"ContainerStarted","Data":"f4a06cec5ab2b2de6390165c050102c03d3b18d9446865f54b6d8a53ab506e14"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.666870 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.669823 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"b3d39ab9-f219-4af5-b82c-102fefaff9bc","Type":"ContainerStarted","Data":"2fd306f407b1b0faeea2f604331782f49a9ae5e5fd8cadafb355c42984171cce"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.673148 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d7a085e-ccb2-4791-9bdb-e3c564e8b450","Type":"ContainerStarted","Data":"deb389227b43ac3e178ffff73e50391e84d063b4782a64d11a8b3ab2ddc65f51"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.675780 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"b0cdc464-b000-47e5-a8d8-0a881ba447c1","Type":"ContainerStarted","Data":"f97965dc0abf9253fdd10747b56f4dcb5da0a52ec94ea4729dcfe883020f1c03"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.676766 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n8r27" event={"ID":"83956a70-e80b-424b-9396-8febf34b60ed","Type":"ContainerStarted","Data":"a2d03a28b5ef1a0d47c5d7bcb9328db12a9e3ee52929adb23af1d7ef2e9098fd"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.683459 4967 scope.go:117] "RemoveContainer" containerID="4c97d011610ef1cc51f6e90b29782a88eb142afdf04bf719202a6585946e08a1" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.683582 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" event={"ID":"55a13dc8-8cec-4642-9c0b-3c6799d942fc","Type":"ContainerStarted","Data":"08ab3af4792e260b3de38d29ff76cb4cb159110cf114f1b0bf6a346b38674baa"} Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.685352 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=22.902891236 podStartE2EDuration="28.685341116s" podCreationTimestamp="2025-11-21 15:53:57 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.132356655 +0000 UTC m=+1145.390877663" lastFinishedPulling="2025-11-21 15:54:22.914806535 +0000 UTC m=+1151.173327543" observedRunningTime="2025-11-21 15:54:25.682703761 +0000 UTC m=+1153.941224769" watchObservedRunningTime="2025-11-21 15:54:25.685341116 +0000 UTC m=+1153.943862124" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.716603 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=10.806451834 podStartE2EDuration="30.716584772s" podCreationTimestamp="2025-11-21 15:53:55 +0000 UTC" firstStartedPulling="2025-11-21 15:53:56.932354099 +0000 UTC m=+1125.190875107" lastFinishedPulling="2025-11-21 15:54:16.842487037 +0000 UTC m=+1145.101008045" observedRunningTime="2025-11-21 15:54:25.71022595 +0000 UTC m=+1153.968746958" watchObservedRunningTime="2025-11-21 15:54:25.716584772 +0000 UTC m=+1153.975105780" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.728875 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.738073 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-hwdts"] Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.743990 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-5c2pr" podStartSLOduration=16.620482771 podStartE2EDuration="22.743970197s" podCreationTimestamp="2025-11-21 15:54:03 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.206329505 +0000 UTC m=+1145.464850513" lastFinishedPulling="2025-11-21 15:54:23.329816931 +0000 UTC m=+1151.588337939" observedRunningTime="2025-11-21 15:54:25.740050575 +0000 UTC m=+1153.998571603" watchObservedRunningTime="2025-11-21 15:54:25.743970197 +0000 UTC m=+1154.002491205" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.761904 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=29.76188908 podStartE2EDuration="29.76188908s" podCreationTimestamp="2025-11-21 15:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:25.759684267 +0000 UTC m=+1154.018205275" watchObservedRunningTime="2025-11-21 15:54:25.76188908 +0000 UTC m=+1154.020410088" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.805145 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=19.495633653 podStartE2EDuration="25.80512322s" podCreationTimestamp="2025-11-21 15:54:00 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.849607684 +0000 UTC m=+1146.108128692" lastFinishedPulling="2025-11-21 15:54:24.159097251 +0000 UTC m=+1152.417618259" observedRunningTime="2025-11-21 15:54:25.797086029 +0000 UTC m=+1154.055607047" watchObservedRunningTime="2025-11-21 15:54:25.80512322 +0000 UTC m=+1154.063644228" Nov 21 15:54:25 crc kubenswrapper[4967]: I1121 15:54:25.816371 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-b7zgw" podStartSLOduration=19.565844948 podStartE2EDuration="24.816354592s" podCreationTimestamp="2025-11-21 15:54:01 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.852599069 +0000 UTC m=+1146.111120077" lastFinishedPulling="2025-11-21 15:54:23.103108713 +0000 UTC m=+1151.361629721" observedRunningTime="2025-11-21 15:54:25.812060949 +0000 UTC m=+1154.070581957" watchObservedRunningTime="2025-11-21 15:54:25.816354592 +0000 UTC m=+1154.074875600" Nov 21 15:54:26 crc kubenswrapper[4967]: I1121 15:54:26.479201 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 21 15:54:26 crc kubenswrapper[4967]: I1121 15:54:26.479252 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 21 15:54:26 crc kubenswrapper[4967]: I1121 15:54:26.559461 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="946f196e-9c08-497e-9701-7da21de6b10b" path="/var/lib/kubelet/pods/946f196e-9c08-497e-9701-7da21de6b10b/volumes" Nov 21 15:54:26 crc kubenswrapper[4967]: I1121 15:54:26.693923 4967 generic.go:334] "Generic (PLEG): container finished" podID="83956a70-e80b-424b-9396-8febf34b60ed" containerID="a2d03a28b5ef1a0d47c5d7bcb9328db12a9e3ee52929adb23af1d7ef2e9098fd" exitCode=0 Nov 21 15:54:26 crc kubenswrapper[4967]: I1121 15:54:26.693985 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n8r27" event={"ID":"83956a70-e80b-424b-9396-8febf34b60ed","Type":"ContainerDied","Data":"a2d03a28b5ef1a0d47c5d7bcb9328db12a9e3ee52929adb23af1d7ef2e9098fd"} Nov 21 15:54:27 crc kubenswrapper[4967]: I1121 15:54:27.711479 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n8r27" event={"ID":"83956a70-e80b-424b-9396-8febf34b60ed","Type":"ContainerStarted","Data":"3a8b9ecd288833602558926cb901851fd945f6fc90980ecc61339dabee68c151"} Nov 21 15:54:27 crc kubenswrapper[4967]: I1121 15:54:27.712834 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerStarted","Data":"4d1c38784eb7fab979868a378c3864fcbd98b4b458a3d037be3227c959d81d16"} Nov 21 15:54:27 crc kubenswrapper[4967]: I1121 15:54:27.878747 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 21 15:54:27 crc kubenswrapper[4967]: I1121 15:54:27.878881 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 21 15:54:28 crc kubenswrapper[4967]: I1121 15:54:28.724689 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-n8r27" event={"ID":"83956a70-e80b-424b-9396-8febf34b60ed","Type":"ContainerStarted","Data":"5b426458a5167ea8145232a419256db441a556108c654e392302d65c2ca46002"} Nov 21 15:54:28 crc kubenswrapper[4967]: I1121 15:54:28.745151 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-n8r27" podStartSLOduration=20.639654005 podStartE2EDuration="25.745134731s" podCreationTimestamp="2025-11-21 15:54:03 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.997333008 +0000 UTC m=+1146.255854016" lastFinishedPulling="2025-11-21 15:54:23.102813734 +0000 UTC m=+1151.361334742" observedRunningTime="2025-11-21 15:54:28.741729764 +0000 UTC m=+1157.000250782" watchObservedRunningTime="2025-11-21 15:54:28.745134731 +0000 UTC m=+1157.003655739" Nov 21 15:54:29 crc kubenswrapper[4967]: I1121 15:54:29.338870 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:29 crc kubenswrapper[4967]: I1121 15:54:29.338921 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:31 crc kubenswrapper[4967]: I1121 15:54:31.751670 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 15:54:32 crc kubenswrapper[4967]: I1121 15:54:32.983919 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.062370 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.278507 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.773343 4967 generic.go:334] "Generic (PLEG): container finished" podID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerID="4d1c38784eb7fab979868a378c3864fcbd98b4b458a3d037be3227c959d81d16" exitCode=0 Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.773410 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerDied","Data":"4d1c38784eb7fab979868a378c3864fcbd98b4b458a3d037be3227c959d81d16"} Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.777192 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1af05a7c-7c8a-42fa-a520-047cc273227b","Type":"ContainerStarted","Data":"09cf7cb7ecea4359fc8bba218130de272835052cd0f5c0612431ae79959cf3a1"} Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.783158 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"b0cdc464-b000-47e5-a8d8-0a881ba447c1","Type":"ContainerStarted","Data":"63b536799a6de6adc7fb5cadebfc28bffb5226e92f5d41ce644dd3e9ad6cf613"} Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.832290 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=16.353481467 podStartE2EDuration="31.832265532s" podCreationTimestamp="2025-11-21 15:54:02 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.736050749 +0000 UTC m=+1145.994571767" lastFinishedPulling="2025-11-21 15:54:33.214834824 +0000 UTC m=+1161.473355832" observedRunningTime="2025-11-21 15:54:33.82242671 +0000 UTC m=+1162.080947718" watchObservedRunningTime="2025-11-21 15:54:33.832265532 +0000 UTC m=+1162.090786540" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.844266 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=12.353389964 podStartE2EDuration="27.844244445s" podCreationTimestamp="2025-11-21 15:54:06 +0000 UTC" firstStartedPulling="2025-11-21 15:54:17.734122864 +0000 UTC m=+1145.992643872" lastFinishedPulling="2025-11-21 15:54:33.224977345 +0000 UTC m=+1161.483498353" observedRunningTime="2025-11-21 15:54:33.837215974 +0000 UTC m=+1162.095736992" watchObservedRunningTime="2025-11-21 15:54:33.844244445 +0000 UTC m=+1162.102765463" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.870087 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.870133 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:33 crc kubenswrapper[4967]: I1121 15:54:33.916943 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:34 crc kubenswrapper[4967]: I1121 15:54:34.559287 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:34 crc kubenswrapper[4967]: I1121 15:54:34.601586 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:34 crc kubenswrapper[4967]: I1121 15:54:34.790489 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:34 crc kubenswrapper[4967]: I1121 15:54:34.827618 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 21 15:54:34 crc kubenswrapper[4967]: I1121 15:54:34.828400 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.173584 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bn5km"] Nov 21 15:54:35 crc kubenswrapper[4967]: E1121 15:54:35.175054 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="init" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.175169 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="init" Nov 21 15:54:35 crc kubenswrapper[4967]: E1121 15:54:35.175271 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="dnsmasq-dns" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.175353 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="dnsmasq-dns" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.175689 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="946f196e-9c08-497e-9701-7da21de6b10b" containerName="dnsmasq-dns" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.179363 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.182127 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bn5km"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.182545 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.264294 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.265133 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.265176 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.265211 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bznw7\" (UniqueName: \"kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.272989 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-kdj29"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.274284 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.276434 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.302807 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-kdj29"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.367969 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-combined-ca-bundle\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368073 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368117 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovs-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368156 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovn-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368183 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368206 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368227 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bznw7\" (UniqueName: \"kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368337 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368462 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77e7b435-56ef-4877-9fd1-cfd83b68209e-config\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.368514 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rgph\" (UniqueName: \"kubernetes.io/projected/77e7b435-56ef-4877-9fd1-cfd83b68209e-kube-api-access-5rgph\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.369246 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.369634 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.369784 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.397708 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bn5km"] Nov 21 15:54:35 crc kubenswrapper[4967]: E1121 15:54:35.398654 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-bznw7], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" podUID="861103dc-e186-402b-82ae-d8ba926b1cd1" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.403333 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bznw7\" (UniqueName: \"kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7\") pod \"dnsmasq-dns-5bf47b49b7-bn5km\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.436710 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.439234 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.443353 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-lltpj" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.443436 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.443567 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.443605 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.455281 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.457083 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475534 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475630 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovs-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475677 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475711 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovn-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475757 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475835 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77e7b435-56ef-4877-9fd1-cfd83b68209e-config\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.475870 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rgph\" (UniqueName: \"kubernetes.io/projected/77e7b435-56ef-4877-9fd1-cfd83b68209e-kube-api-access-5rgph\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.476807 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovs-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.476935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77e7b435-56ef-4877-9fd1-cfd83b68209e-ovn-rundir\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.477686 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77e7b435-56ef-4877-9fd1-cfd83b68209e-config\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.478074 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-scripts\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.478372 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.479286 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.480780 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkxg8\" (UniqueName: \"kubernetes.io/projected/7d17eb49-4204-4589-82ac-c147f1b7b456-kube-api-access-vkxg8\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.480876 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-config\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.480932 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-combined-ca-bundle\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.481002 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.481182 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.486130 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-combined-ca-bundle\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.491210 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e7b435-56ef-4877-9fd1-cfd83b68209e-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.503657 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.575977 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rgph\" (UniqueName: \"kubernetes.io/projected/77e7b435-56ef-4877-9fd1-cfd83b68209e-kube-api-access-5rgph\") pod \"ovn-controller-metrics-kdj29\" (UID: \"77e7b435-56ef-4877-9fd1-cfd83b68209e\") " pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585571 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-scripts\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585656 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585688 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585722 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkxg8\" (UniqueName: \"kubernetes.io/projected/7d17eb49-4204-4589-82ac-c147f1b7b456-kube-api-access-vkxg8\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585756 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-config\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585779 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585805 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585881 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585959 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.585979 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljpg2\" (UniqueName: \"kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.586002 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.586052 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.586627 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.588097 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-scripts\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.594797 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-kdj29" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.595687 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d17eb49-4204-4589-82ac-c147f1b7b456-config\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.607188 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.616641 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.616678 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d17eb49-4204-4589-82ac-c147f1b7b456-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.656845 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkxg8\" (UniqueName: \"kubernetes.io/projected/7d17eb49-4204-4589-82ac-c147f1b7b456-kube-api-access-vkxg8\") pod \"ovn-northd-0\" (UID: \"7d17eb49-4204-4589-82ac-c147f1b7b456\") " pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.687447 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.687499 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljpg2\" (UniqueName: \"kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.687595 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.687610 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.687651 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.688624 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.689627 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.690213 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.690777 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.714031 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljpg2\" (UniqueName: \"kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2\") pod \"dnsmasq-dns-8554648995-b5ck8\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.800387 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.821916 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.892269 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config\") pod \"861103dc-e186-402b-82ae-d8ba926b1cd1\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.892702 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc\") pod \"861103dc-e186-402b-82ae-d8ba926b1cd1\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.892738 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb\") pod \"861103dc-e186-402b-82ae-d8ba926b1cd1\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.892825 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bznw7\" (UniqueName: \"kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7\") pod \"861103dc-e186-402b-82ae-d8ba926b1cd1\" (UID: \"861103dc-e186-402b-82ae-d8ba926b1cd1\") " Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.893629 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "861103dc-e186-402b-82ae-d8ba926b1cd1" (UID: "861103dc-e186-402b-82ae-d8ba926b1cd1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.893935 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config" (OuterVolumeSpecName: "config") pod "861103dc-e186-402b-82ae-d8ba926b1cd1" (UID: "861103dc-e186-402b-82ae-d8ba926b1cd1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.894703 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "861103dc-e186-402b-82ae-d8ba926b1cd1" (UID: "861103dc-e186-402b-82ae-d8ba926b1cd1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.904202 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7" (OuterVolumeSpecName: "kube-api-access-bznw7") pod "861103dc-e186-402b-82ae-d8ba926b1cd1" (UID: "861103dc-e186-402b-82ae-d8ba926b1cd1"). InnerVolumeSpecName "kube-api-access-bznw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.956993 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.973889 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.996331 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bznw7\" (UniqueName: \"kubernetes.io/projected/861103dc-e186-402b-82ae-d8ba926b1cd1-kube-api-access-bznw7\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.996375 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.996390 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:35 crc kubenswrapper[4967]: I1121 15:54:35.996403 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/861103dc-e186-402b-82ae-d8ba926b1cd1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.261196 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-kdj29"] Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.596477 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 15:54:36 crc kubenswrapper[4967]: W1121 15:54:36.605333 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d17eb49_4204_4589_82ac_c147f1b7b456.slice/crio-b208ca686db4f7385018c699c00546c9ffc3fad7be4dd1b4aaf632669c035d21 WatchSource:0}: Error finding container b208ca686db4f7385018c699c00546c9ffc3fad7be4dd1b4aaf632669c035d21: Status 404 returned error can't find the container with id b208ca686db4f7385018c699c00546c9ffc3fad7be4dd1b4aaf632669c035d21 Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.607325 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:36 crc kubenswrapper[4967]: W1121 15:54:36.628120 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod151dc631_19a9_439f_89db_c3da47602908.slice/crio-986875d8f59c124cadd092f553b4b071ac9cfe437add5d3b2d0dcbf8d29c8863 WatchSource:0}: Error finding container 986875d8f59c124cadd092f553b4b071ac9cfe437add5d3b2d0dcbf8d29c8863: Status 404 returned error can't find the container with id 986875d8f59c124cadd092f553b4b071ac9cfe437add5d3b2d0dcbf8d29c8863 Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.731195 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.821526 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7d17eb49-4204-4589-82ac-c147f1b7b456","Type":"ContainerStarted","Data":"b208ca686db4f7385018c699c00546c9ffc3fad7be4dd1b4aaf632669c035d21"} Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.824041 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-b5ck8" event={"ID":"151dc631-19a9-439f-89db-c3da47602908","Type":"ContainerStarted","Data":"986875d8f59c124cadd092f553b4b071ac9cfe437add5d3b2d0dcbf8d29c8863"} Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.826945 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.826995 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-kdj29" event={"ID":"77e7b435-56ef-4877-9fd1-cfd83b68209e","Type":"ContainerStarted","Data":"a291e9b812ce4694256d845703613089b318e6303a6fe189b93275063e015871"} Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.827022 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-kdj29" event={"ID":"77e7b435-56ef-4877-9fd1-cfd83b68209e","Type":"ContainerStarted","Data":"b8374f9142b4729ada190aad6b92e1ec081fba79c32fa260723e28ba8f6f71cb"} Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.829522 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-bn5km" Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.829536 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerStarted","Data":"80b38e9eacd01d13b36f4476fb0cc8a6b6b79cf32296b5aa4a7ba23654c8c79c"} Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.877032 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-kdj29" podStartSLOduration=1.877008112 podStartE2EDuration="1.877008112s" podCreationTimestamp="2025-11-21 15:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:36.870849806 +0000 UTC m=+1165.129370824" watchObservedRunningTime="2025-11-21 15:54:36.877008112 +0000 UTC m=+1165.135529130" Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.926713 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bn5km"] Nov 21 15:54:36 crc kubenswrapper[4967]: I1121 15:54:36.951628 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-bn5km"] Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.844415 4967 generic.go:334] "Generic (PLEG): container finished" podID="151dc631-19a9-439f-89db-c3da47602908" containerID="86bc9453d91dc4d415614a79c71ce3dfff1b48ef3b82352ca392072db9159931" exitCode=0 Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.845640 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-b5ck8" event={"ID":"151dc631-19a9-439f-89db-c3da47602908","Type":"ContainerDied","Data":"86bc9453d91dc4d415614a79c71ce3dfff1b48ef3b82352ca392072db9159931"} Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.874279 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-86c0-account-create-6xrqg"] Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.878304 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.884521 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 21 15:54:37 crc kubenswrapper[4967]: I1121 15:54:37.934798 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-86c0-account-create-6xrqg"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.003032 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-8fnsz"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.019851 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8fnsz"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.020027 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.059012 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8zwl\" (UniqueName: \"kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.059160 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.169708 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8zwl\" (UniqueName: \"kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.180942 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fznxb\" (UniqueName: \"kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.181396 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.181582 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.170348 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-hdqhx"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.183733 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.189570 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.190349 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-hdqhx"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.220202 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8zwl\" (UniqueName: \"kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl\") pod \"keystone-86c0-account-create-6xrqg\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.285372 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fznxb\" (UniqueName: \"kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.285439 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.286305 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.297651 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-81db-account-create-8qvgv"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.299113 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.305219 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.322298 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-81db-account-create-8qvgv"] Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.323335 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fznxb\" (UniqueName: \"kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb\") pod \"keystone-db-create-8fnsz\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.390864 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktfzj\" (UniqueName: \"kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.390916 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.390935 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvsb4\" (UniqueName: \"kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.390964 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.410049 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.433252 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.492713 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktfzj\" (UniqueName: \"kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.492762 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.492789 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvsb4\" (UniqueName: \"kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.492817 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.493678 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.493708 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.513577 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktfzj\" (UniqueName: \"kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj\") pod \"placement-81db-account-create-8qvgv\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.513755 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvsb4\" (UniqueName: \"kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4\") pod \"placement-db-create-hdqhx\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.554084 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="861103dc-e186-402b-82ae-d8ba926b1cd1" path="/var/lib/kubelet/pods/861103dc-e186-402b-82ae-d8ba926b1cd1/volumes" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.591856 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.660400 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.860449 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-b5ck8" event={"ID":"151dc631-19a9-439f-89db-c3da47602908","Type":"ContainerStarted","Data":"510e64ce8d8d64f5313f82c3f72ba24e3756dd8f364bbfccef79c6da29a4b062"} Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.862943 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:38 crc kubenswrapper[4967]: I1121 15:54:38.915671 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-b5ck8" podStartSLOduration=3.915650636 podStartE2EDuration="3.915650636s" podCreationTimestamp="2025-11-21 15:54:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:38.901021866 +0000 UTC m=+1167.159542894" watchObservedRunningTime="2025-11-21 15:54:38.915650636 +0000 UTC m=+1167.174171644" Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.350514 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-86c0-account-create-6xrqg"] Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.359563 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8fnsz"] Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.505146 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-hdqhx"] Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.596720 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-81db-account-create-8qvgv"] Nov 21 15:54:39 crc kubenswrapper[4967]: W1121 15:54:39.618170 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a44abc5_c95d_4bb5_ae69_1b770ee9fe56.slice/crio-afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7 WatchSource:0}: Error finding container afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7: Status 404 returned error can't find the container with id afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7 Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.877516 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81db-account-create-8qvgv" event={"ID":"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56","Type":"ContainerStarted","Data":"10f259730fb38a71b2cdccd1ef36b24320560377be351357fa3351e6f5db57f9"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.877819 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81db-account-create-8qvgv" event={"ID":"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56","Type":"ContainerStarted","Data":"afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.882202 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7d17eb49-4204-4589-82ac-c147f1b7b456","Type":"ContainerStarted","Data":"6222b799ce745899894a1d2cc2ba832b0218c5e0ec86b3af68c817166c3e04f2"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.883378 4967 generic.go:334] "Generic (PLEG): container finished" podID="6fb72003-35bf-4473-be52-303e57f5351d" containerID="5c15e2cf6c73812b7e054641ec4ffd9378713c9efb432d4ae6d95abfeb7e484b" exitCode=0 Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.883433 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8fnsz" event={"ID":"6fb72003-35bf-4473-be52-303e57f5351d","Type":"ContainerDied","Data":"5c15e2cf6c73812b7e054641ec4ffd9378713c9efb432d4ae6d95abfeb7e484b"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.883449 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8fnsz" event={"ID":"6fb72003-35bf-4473-be52-303e57f5351d","Type":"ContainerStarted","Data":"ed16a9194d5cd644e03262d8a9efa07ffdb7d8a31dd628a3fd73377593f3e498"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.885572 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86c0-account-create-6xrqg" event={"ID":"e57707db-bae6-4223-968e-52d7ba80f7f2","Type":"ContainerStarted","Data":"132ddaec39109b192e83e49b225bd554f8ce105e338909116db1e2b95912817f"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.885602 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86c0-account-create-6xrqg" event={"ID":"e57707db-bae6-4223-968e-52d7ba80f7f2","Type":"ContainerStarted","Data":"f2c5f6f251ef92d3ad5399f5b0cdd30a00b76da31145c653150db7a461b38c12"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.889237 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hdqhx" event={"ID":"704454f3-1078-44b7-b41f-3da332e4015f","Type":"ContainerStarted","Data":"1abeadf52cc378302c89526798d9d483a176622c177aafef726e98e975471f59"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.889272 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hdqhx" event={"ID":"704454f3-1078-44b7-b41f-3da332e4015f","Type":"ContainerStarted","Data":"99c46ef65136e2a8e250c8adebcd4c1611df3516f0e5d3f35ce35f2cb7346f46"} Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.944492 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-81db-account-create-8qvgv" podStartSLOduration=1.9444655050000001 podStartE2EDuration="1.944465505s" podCreationTimestamp="2025-11-21 15:54:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:39.912381675 +0000 UTC m=+1168.170902683" watchObservedRunningTime="2025-11-21 15:54:39.944465505 +0000 UTC m=+1168.202986513" Nov 21 15:54:39 crc kubenswrapper[4967]: I1121 15:54:39.962493 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-86c0-account-create-6xrqg" podStartSLOduration=2.9624713 podStartE2EDuration="2.9624713s" podCreationTimestamp="2025-11-21 15:54:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:39.948225952 +0000 UTC m=+1168.206746970" watchObservedRunningTime="2025-11-21 15:54:39.9624713 +0000 UTC m=+1168.220992328" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.778690 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kn45m"] Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.780382 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.876745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8zwd\" (UniqueName: \"kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.876823 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.893523 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kn45m"] Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.960062 4967 generic.go:334] "Generic (PLEG): container finished" podID="704454f3-1078-44b7-b41f-3da332e4015f" containerID="1abeadf52cc378302c89526798d9d483a176622c177aafef726e98e975471f59" exitCode=0 Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.960165 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hdqhx" event={"ID":"704454f3-1078-44b7-b41f-3da332e4015f","Type":"ContainerDied","Data":"1abeadf52cc378302c89526798d9d483a176622c177aafef726e98e975471f59"} Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.989466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8zwd\" (UniqueName: \"kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.989728 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.990505 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.996734 4967 generic.go:334] "Generic (PLEG): container finished" podID="8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" containerID="10f259730fb38a71b2cdccd1ef36b24320560377be351357fa3351e6f5db57f9" exitCode=0 Nov 21 15:54:40 crc kubenswrapper[4967]: I1121 15:54:40.996827 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81db-account-create-8qvgv" event={"ID":"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56","Type":"ContainerDied","Data":"10f259730fb38a71b2cdccd1ef36b24320560377be351357fa3351e6f5db57f9"} Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.034632 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"7d17eb49-4204-4589-82ac-c147f1b7b456","Type":"ContainerStarted","Data":"e4277cdd812705707b5c929b5c9ee595a8258c4cd33bf4197156f62b4a3d62ad"} Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.035957 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.070236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8zwd\" (UniqueName: \"kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd\") pod \"mysqld-exporter-openstack-db-create-kn45m\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.085730 4967 generic.go:334] "Generic (PLEG): container finished" podID="e57707db-bae6-4223-968e-52d7ba80f7f2" containerID="132ddaec39109b192e83e49b225bd554f8ce105e338909116db1e2b95912817f" exitCode=0 Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.085996 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86c0-account-create-6xrqg" event={"ID":"e57707db-bae6-4223-968e-52d7ba80f7f2","Type":"ContainerDied","Data":"132ddaec39109b192e83e49b225bd554f8ce105e338909116db1e2b95912817f"} Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.095362 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-50c0-account-create-c5xnl"] Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.096931 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.105023 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.110156 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.136597 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-50c0-account-create-c5xnl"] Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.182382 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.182658 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-b5ck8" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="dnsmasq-dns" containerID="cri-o://510e64ce8d8d64f5313f82c3f72ba24e3756dd8f364bbfccef79c6da29a4b062" gracePeriod=10 Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.201513 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=4.038039213 podStartE2EDuration="6.201491794s" podCreationTimestamp="2025-11-21 15:54:35 +0000 UTC" firstStartedPulling="2025-11-21 15:54:36.609664189 +0000 UTC m=+1164.868185197" lastFinishedPulling="2025-11-21 15:54:38.77311677 +0000 UTC m=+1167.031637778" observedRunningTime="2025-11-21 15:54:41.136274054 +0000 UTC m=+1169.394795062" watchObservedRunningTime="2025-11-21 15:54:41.201491794 +0000 UTC m=+1169.460012812" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.216216 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.251689 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.303113 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.303276 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6rt7\" (UniqueName: \"kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.358398 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.406687 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.406738 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.406796 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4svb\" (UniqueName: \"kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.406872 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.407029 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6rt7\" (UniqueName: \"kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.407106 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.407200 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.408675 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.436196 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6rt7\" (UniqueName: \"kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7\") pod \"mysqld-exporter-50c0-account-create-c5xnl\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.468114 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.509673 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.509731 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.509762 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4svb\" (UniqueName: \"kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.509806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.509901 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.510944 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.510996 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.511094 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.511119 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.536725 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4svb\" (UniqueName: \"kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb\") pod \"dnsmasq-dns-b8fbc5445-rdtt2\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:41 crc kubenswrapper[4967]: I1121 15:54:41.585488 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.097443 4967 generic.go:334] "Generic (PLEG): container finished" podID="151dc631-19a9-439f-89db-c3da47602908" containerID="510e64ce8d8d64f5313f82c3f72ba24e3756dd8f364bbfccef79c6da29a4b062" exitCode=0 Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.097543 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-b5ck8" event={"ID":"151dc631-19a9-439f-89db-c3da47602908","Type":"ContainerDied","Data":"510e64ce8d8d64f5313f82c3f72ba24e3756dd8f364bbfccef79c6da29a4b062"} Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.324664 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.334627 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.338488 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.338757 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-jpfkz" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.338986 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.339113 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.351817 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.409920 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-ddpl5"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.411272 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.414961 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.415695 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.415899 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.441289 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-cache\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.441400 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.441435 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rvs4\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-kube-api-access-8rvs4\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.441492 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-lock\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.441522 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.450092 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-ddpl5"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.489114 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-ddpl5"] Nov 21 15:54:42 crc kubenswrapper[4967]: E1121 15:54:42.490258 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-52mft ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-ddpl5" podUID="a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.511139 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-bf2mc"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.513059 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.543809 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544416 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544477 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544509 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-cache\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544548 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544587 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544658 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544723 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rvs4\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-kube-api-access-8rvs4\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544774 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: E1121 15:54:42.544827 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:42 crc kubenswrapper[4967]: E1121 15:54:42.544859 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:42 crc kubenswrapper[4967]: E1121 15:54:42.544918 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:54:43.04489402 +0000 UTC m=+1171.303415078 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.544846 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-lock\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.545027 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-cache\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.545059 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.545149 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52mft\" (UniqueName: \"kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.545468 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.545649 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-lock\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.559851 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bf2mc"] Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.564215 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rvs4\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-kube-api-access-8rvs4\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.620854 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.652247 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.652399 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.652479 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.652618 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.654462 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.654968 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655036 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655186 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8m9r\" (UniqueName: \"kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655224 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655264 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655365 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655427 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655470 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655579 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52mft\" (UniqueName: \"kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.655972 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.657085 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.657960 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.662043 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.666737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.668547 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.674595 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52mft\" (UniqueName: \"kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft\") pod \"swift-ring-rebalance-ddpl5\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.758732 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8m9r\" (UniqueName: \"kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.758785 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.758827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.758884 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.758924 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.759011 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.759062 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.760502 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.760965 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.761018 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.767980 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.767981 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.771837 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.778803 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8m9r\" (UniqueName: \"kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r\") pod \"swift-ring-rebalance-bf2mc\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:42 crc kubenswrapper[4967]: I1121 15:54:42.863844 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.068740 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:43 crc kubenswrapper[4967]: E1121 15:54:43.068933 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:43 crc kubenswrapper[4967]: E1121 15:54:43.068965 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:43 crc kubenswrapper[4967]: E1121 15:54:43.069027 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:54:44.069008532 +0000 UTC m=+1172.327529540 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.106707 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.118749 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274260 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274389 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274411 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274480 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274555 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52mft\" (UniqueName: \"kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274598 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.274801 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift\") pod \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\" (UID: \"a5b78f26-c6ca-46b3-bc31-04d062e7c9b0\") " Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.276141 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.276585 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.279412 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.279519 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.279869 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts" (OuterVolumeSpecName: "scripts") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.281831 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft" (OuterVolumeSpecName: "kube-api-access-52mft") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "kube-api-access-52mft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.283085 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" (UID: "a5b78f26-c6ca-46b3-bc31-04d062e7c9b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377610 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52mft\" (UniqueName: \"kubernetes.io/projected/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-kube-api-access-52mft\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377652 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377660 4967 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377670 4967 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377679 4967 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377687 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.377694 4967 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.525288 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-xpqkr"] Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.528582 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.532901 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xpqkr"] Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.640678 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-d1ef-account-create-bpt7z"] Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.642387 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.645179 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.655987 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d1ef-account-create-bpt7z"] Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.687446 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dntr\" (UniqueName: \"kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.687508 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.789293 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74mtj\" (UniqueName: \"kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.789512 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.789629 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dntr\" (UniqueName: \"kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.789674 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.790935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.810217 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dntr\" (UniqueName: \"kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr\") pod \"glance-db-create-xpqkr\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.850930 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.892590 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74mtj\" (UniqueName: \"kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.892680 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.893455 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.911876 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74mtj\" (UniqueName: \"kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj\") pod \"glance-d1ef-account-create-bpt7z\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:43 crc kubenswrapper[4967]: I1121 15:54:43.959266 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:44 crc kubenswrapper[4967]: I1121 15:54:44.096376 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:44 crc kubenswrapper[4967]: E1121 15:54:44.096534 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:44 crc kubenswrapper[4967]: E1121 15:54:44.096553 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:44 crc kubenswrapper[4967]: E1121 15:54:44.096598 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:54:46.096584325 +0000 UTC m=+1174.355105333 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:44 crc kubenswrapper[4967]: I1121 15:54:44.115242 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ddpl5" Nov 21 15:54:44 crc kubenswrapper[4967]: I1121 15:54:44.181038 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-ddpl5"] Nov 21 15:54:44 crc kubenswrapper[4967]: I1121 15:54:44.187632 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-ddpl5"] Nov 21 15:54:44 crc kubenswrapper[4967]: I1121 15:54:44.549293 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b78f26-c6ca-46b3-bc31-04d062e7c9b0" path="/var/lib/kubelet/pods/a5b78f26-c6ca-46b3-bc31-04d062e7c9b0/volumes" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.865987 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.878117 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.894000 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.895810 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.932121 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts\") pod \"e57707db-bae6-4223-968e-52d7ba80f7f2\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.932362 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fznxb\" (UniqueName: \"kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb\") pod \"6fb72003-35bf-4473-be52-303e57f5351d\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.932802 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8zwl\" (UniqueName: \"kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl\") pod \"e57707db-bae6-4223-968e-52d7ba80f7f2\" (UID: \"e57707db-bae6-4223-968e-52d7ba80f7f2\") " Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.932864 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts\") pod \"6fb72003-35bf-4473-be52-303e57f5351d\" (UID: \"6fb72003-35bf-4473-be52-303e57f5351d\") " Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.934245 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6fb72003-35bf-4473-be52-303e57f5351d" (UID: "6fb72003-35bf-4473-be52-303e57f5351d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.934706 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e57707db-bae6-4223-968e-52d7ba80f7f2" (UID: "e57707db-bae6-4223-968e-52d7ba80f7f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.973664 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb" (OuterVolumeSpecName: "kube-api-access-fznxb") pod "6fb72003-35bf-4473-be52-303e57f5351d" (UID: "6fb72003-35bf-4473-be52-303e57f5351d"). InnerVolumeSpecName "kube-api-access-fznxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:45 crc kubenswrapper[4967]: I1121 15:54:45.988490 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl" (OuterVolumeSpecName: "kube-api-access-d8zwl") pod "e57707db-bae6-4223-968e-52d7ba80f7f2" (UID: "e57707db-bae6-4223-968e-52d7ba80f7f2"). InnerVolumeSpecName "kube-api-access-d8zwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035222 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktfzj\" (UniqueName: \"kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj\") pod \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035285 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvsb4\" (UniqueName: \"kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4\") pod \"704454f3-1078-44b7-b41f-3da332e4015f\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035347 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts\") pod \"704454f3-1078-44b7-b41f-3da332e4015f\" (UID: \"704454f3-1078-44b7-b41f-3da332e4015f\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035418 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts\") pod \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\" (UID: \"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035931 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fznxb\" (UniqueName: \"kubernetes.io/projected/6fb72003-35bf-4473-be52-303e57f5351d-kube-api-access-fznxb\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035962 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8zwl\" (UniqueName: \"kubernetes.io/projected/e57707db-bae6-4223-968e-52d7ba80f7f2-kube-api-access-d8zwl\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035974 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fb72003-35bf-4473-be52-303e57f5351d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.035983 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e57707db-bae6-4223-968e-52d7ba80f7f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.036365 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" (UID: "8a44abc5-c95d-4bb5-ae69-1b770ee9fe56"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.037853 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "704454f3-1078-44b7-b41f-3da332e4015f" (UID: "704454f3-1078-44b7-b41f-3da332e4015f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.040650 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4" (OuterVolumeSpecName: "kube-api-access-pvsb4") pod "704454f3-1078-44b7-b41f-3da332e4015f" (UID: "704454f3-1078-44b7-b41f-3da332e4015f"). InnerVolumeSpecName "kube-api-access-pvsb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.056103 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj" (OuterVolumeSpecName: "kube-api-access-ktfzj") pod "8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" (UID: "8a44abc5-c95d-4bb5-ae69-1b770ee9fe56"). InnerVolumeSpecName "kube-api-access-ktfzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.086947 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.137740 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81db-account-create-8qvgv" event={"ID":"8a44abc5-c95d-4bb5-ae69-1b770ee9fe56","Type":"ContainerDied","Data":"afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.137989 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb\") pod \"151dc631-19a9-439f-89db-c3da47602908\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.138085 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81db-account-create-8qvgv" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.138092 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc\") pod \"151dc631-19a9-439f-89db-c3da47602908\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.138112 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljpg2\" (UniqueName: \"kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2\") pod \"151dc631-19a9-439f-89db-c3da47602908\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.138004 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afaa63ac5ed7a0a19de950d8c747dc661526d141890468a713eccea572e090b7" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.141135 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:46 crc kubenswrapper[4967]: E1121 15:54:46.141295 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:46 crc kubenswrapper[4967]: E1121 15:54:46.141329 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:46 crc kubenswrapper[4967]: E1121 15:54:46.141399 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:54:50.141381445 +0000 UTC m=+1178.399902453 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.142871 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.142922 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktfzj\" (UniqueName: \"kubernetes.io/projected/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56-kube-api-access-ktfzj\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.142935 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvsb4\" (UniqueName: \"kubernetes.io/projected/704454f3-1078-44b7-b41f-3da332e4015f-kube-api-access-pvsb4\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.142945 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/704454f3-1078-44b7-b41f-3da332e4015f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.143291 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8fnsz" event={"ID":"6fb72003-35bf-4473-be52-303e57f5351d","Type":"ContainerDied","Data":"ed16a9194d5cd644e03262d8a9efa07ffdb7d8a31dd628a3fd73377593f3e498"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.143351 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed16a9194d5cd644e03262d8a9efa07ffdb7d8a31dd628a3fd73377593f3e498" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.143428 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8fnsz" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.146093 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-86c0-account-create-6xrqg" event={"ID":"e57707db-bae6-4223-968e-52d7ba80f7f2","Type":"ContainerDied","Data":"f2c5f6f251ef92d3ad5399f5b0cdd30a00b76da31145c653150db7a461b38c12"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.146158 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2c5f6f251ef92d3ad5399f5b0cdd30a00b76da31145c653150db7a461b38c12" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.146378 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-86c0-account-create-6xrqg" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.149465 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2" (OuterVolumeSpecName: "kube-api-access-ljpg2") pod "151dc631-19a9-439f-89db-c3da47602908" (UID: "151dc631-19a9-439f-89db-c3da47602908"). InnerVolumeSpecName "kube-api-access-ljpg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.155280 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerStarted","Data":"3e971a9fb150a0db0857418622c0514aa345a0c015fa52a0c77ac69a937c3d2a"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.159389 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-hdqhx" event={"ID":"704454f3-1078-44b7-b41f-3da332e4015f","Type":"ContainerDied","Data":"99c46ef65136e2a8e250c8adebcd4c1611df3516f0e5d3f35ce35f2cb7346f46"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.159421 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99c46ef65136e2a8e250c8adebcd4c1611df3516f0e5d3f35ce35f2cb7346f46" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.159490 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-hdqhx" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.163342 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-b5ck8" event={"ID":"151dc631-19a9-439f-89db-c3da47602908","Type":"ContainerDied","Data":"986875d8f59c124cadd092f553b4b071ac9cfe437add5d3b2d0dcbf8d29c8863"} Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.163387 4967 scope.go:117] "RemoveContainer" containerID="510e64ce8d8d64f5313f82c3f72ba24e3756dd8f364bbfccef79c6da29a4b062" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.163445 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-b5ck8" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.193543 4967 scope.go:117] "RemoveContainer" containerID="86bc9453d91dc4d415614a79c71ce3dfff1b48ef3b82352ca392072db9159931" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.206864 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "151dc631-19a9-439f-89db-c3da47602908" (UID: "151dc631-19a9-439f-89db-c3da47602908"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.233111 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "151dc631-19a9-439f-89db-c3da47602908" (UID: "151dc631-19a9-439f-89db-c3da47602908"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.244075 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb\") pod \"151dc631-19a9-439f-89db-c3da47602908\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.244180 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config\") pod \"151dc631-19a9-439f-89db-c3da47602908\" (UID: \"151dc631-19a9-439f-89db-c3da47602908\") " Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.244782 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.244807 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljpg2\" (UniqueName: \"kubernetes.io/projected/151dc631-19a9-439f-89db-c3da47602908-kube-api-access-ljpg2\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.244819 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.290578 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "151dc631-19a9-439f-89db-c3da47602908" (UID: "151dc631-19a9-439f-89db-c3da47602908"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.291103 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config" (OuterVolumeSpecName: "config") pod "151dc631-19a9-439f-89db-c3da47602908" (UID: "151dc631-19a9-439f-89db-c3da47602908"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.347077 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.347116 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/151dc631-19a9-439f-89db-c3da47602908-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.419613 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-50c0-account-create-c5xnl"] Nov 21 15:54:46 crc kubenswrapper[4967]: W1121 15:54:46.420990 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e949c27_e236_4411_b306_eb9e7d3385f4.slice/crio-8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d WatchSource:0}: Error finding container 8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d: Status 404 returned error can't find the container with id 8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.429078 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.438245 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kn45m"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.506974 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.514681 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-b5ck8"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.521766 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.521892 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.549904 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="151dc631-19a9-439f-89db-c3da47602908" path="/var/lib/kubelet/pods/151dc631-19a9-439f-89db-c3da47602908/volumes" Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.638295 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bf2mc"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.647499 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xpqkr"] Nov 21 15:54:46 crc kubenswrapper[4967]: I1121 15:54:46.656738 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d1ef-account-create-bpt7z"] Nov 21 15:54:46 crc kubenswrapper[4967]: W1121 15:54:46.723782 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88694819_ea7a_48ee_89fb_c9df36ca33d9.slice/crio-48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0 WatchSource:0}: Error finding container 48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0: Status 404 returned error can't find the container with id 48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0 Nov 21 15:54:46 crc kubenswrapper[4967]: W1121 15:54:46.724084 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22c59946_4e7a_45d0_8cd3_b7de14d8e8f5.slice/crio-5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648 WatchSource:0}: Error finding container 5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648: Status 404 returned error can't find the container with id 5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648 Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.175545 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bf2mc" event={"ID":"b1974654-371e-49f8-b8d3-701e31f82b54","Type":"ContainerStarted","Data":"e0c573949eaf9559589983b28f8a468281be5f602e90170995c432bde32dff2d"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.177862 4967 generic.go:334] "Generic (PLEG): container finished" podID="5d966c29-248f-49a3-b5c8-e88deb1aa0d9" containerID="842577b1cfc0a47d3fdb8501eeae12483fbad67daf8abbf64d8ae71dd04a257f" exitCode=0 Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.177956 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" event={"ID":"5d966c29-248f-49a3-b5c8-e88deb1aa0d9","Type":"ContainerDied","Data":"842577b1cfc0a47d3fdb8501eeae12483fbad67daf8abbf64d8ae71dd04a257f"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.178038 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" event={"ID":"5d966c29-248f-49a3-b5c8-e88deb1aa0d9","Type":"ContainerStarted","Data":"2e3bdedb588c8d9784bb1596dfccdd1a7ffa011dc7acaff4e865306d6169317d"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.179890 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xpqkr" event={"ID":"88694819-ea7a-48ee-89fb-c9df36ca33d9","Type":"ContainerStarted","Data":"48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.181954 4967 generic.go:334] "Generic (PLEG): container finished" podID="6e949c27-e236-4411-b306-eb9e7d3385f4" containerID="30cc6efdc6c30bd7d5908feee24e8269ea60a7591593733bcf2bae84e41ad947" exitCode=0 Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.182046 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" event={"ID":"6e949c27-e236-4411-b306-eb9e7d3385f4","Type":"ContainerDied","Data":"30cc6efdc6c30bd7d5908feee24e8269ea60a7591593733bcf2bae84e41ad947"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.182112 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" event={"ID":"6e949c27-e236-4411-b306-eb9e7d3385f4","Type":"ContainerStarted","Data":"8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.183409 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d1ef-account-create-bpt7z" event={"ID":"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5","Type":"ContainerStarted","Data":"5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.185559 4967 generic.go:334] "Generic (PLEG): container finished" podID="3678c17b-6120-4063-b84d-7cb362b46b62" containerID="d24740e47a04bfc4aabd110bb8e8aaf83d65fe875b69fb7d6a58f4754f1f8953" exitCode=0 Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.185640 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" event={"ID":"3678c17b-6120-4063-b84d-7cb362b46b62","Type":"ContainerDied","Data":"d24740e47a04bfc4aabd110bb8e8aaf83d65fe875b69fb7d6a58f4754f1f8953"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.185669 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" event={"ID":"3678c17b-6120-4063-b84d-7cb362b46b62","Type":"ContainerStarted","Data":"481cc457850f09de5f6733622295e785de44db3a183cba3a7624a5d78c5052a1"} Nov 21 15:54:47 crc kubenswrapper[4967]: I1121 15:54:47.823517 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-7f56c8cd-xplcm" podUID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" containerName="console" containerID="cri-o://bef07153af84578649ad213b08e3aff50b05f0d93e070ddd2aa06d5097230f2c" gracePeriod=15 Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.199370 4967 generic.go:334] "Generic (PLEG): container finished" podID="88694819-ea7a-48ee-89fb-c9df36ca33d9" containerID="c556a57c9036f81a8c74981a72d6a4aeccd59b9db913adb2455b759cf8afc989" exitCode=0 Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.199602 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xpqkr" event={"ID":"88694819-ea7a-48ee-89fb-c9df36ca33d9","Type":"ContainerDied","Data":"c556a57c9036f81a8c74981a72d6a4aeccd59b9db913adb2455b759cf8afc989"} Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.203829 4967 generic.go:334] "Generic (PLEG): container finished" podID="22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" containerID="09b473eb28b1f48ed986c45f476789a9586fef1ac7c6912423b32319148c0652" exitCode=0 Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.204185 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d1ef-account-create-bpt7z" event={"ID":"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5","Type":"ContainerDied","Data":"09b473eb28b1f48ed986c45f476789a9586fef1ac7c6912423b32319148c0652"} Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.206350 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" event={"ID":"3678c17b-6120-4063-b84d-7cb362b46b62","Type":"ContainerStarted","Data":"d00e911b5f610723bdfe86e653cc8ee024c662cb4968b26abf6b9606993f8579"} Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.206466 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.208167 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7f56c8cd-xplcm_5a8a3edb-6fe7-4597-8e99-2ac664634b00/console/0.log" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.208221 4967 generic.go:334] "Generic (PLEG): container finished" podID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" containerID="bef07153af84578649ad213b08e3aff50b05f0d93e070ddd2aa06d5097230f2c" exitCode=2 Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.208260 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f56c8cd-xplcm" event={"ID":"5a8a3edb-6fe7-4597-8e99-2ac664634b00","Type":"ContainerDied","Data":"bef07153af84578649ad213b08e3aff50b05f0d93e070ddd2aa06d5097230f2c"} Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.244215 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podStartSLOduration=7.244196757 podStartE2EDuration="7.244196757s" podCreationTimestamp="2025-11-21 15:54:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:48.231867264 +0000 UTC m=+1176.490388272" watchObservedRunningTime="2025-11-21 15:54:48.244196757 +0000 UTC m=+1176.502717765" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.431264 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7f56c8cd-xplcm_5a8a3edb-6fe7-4597-8e99-2ac664634b00/console/0.log" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.431683 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.500731 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.500816 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.500948 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.501009 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.501142 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.501169 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvdb4\" (UniqueName: \"kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.501223 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config\") pod \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\" (UID: \"5a8a3edb-6fe7-4597-8e99-2ac664634b00\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.501825 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.502359 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.502755 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca" (OuterVolumeSpecName: "service-ca") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.503045 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config" (OuterVolumeSpecName: "console-config") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.605465 4967 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.605501 4967 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.605511 4967 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.605522 4967 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5a8a3edb-6fe7-4597-8e99-2ac664634b00-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.611907 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.612730 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.615077 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4" (OuterVolumeSpecName: "kube-api-access-gvdb4") pod "5a8a3edb-6fe7-4597-8e99-2ac664634b00" (UID: "5a8a3edb-6fe7-4597-8e99-2ac664634b00"). InnerVolumeSpecName "kube-api-access-gvdb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.706889 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvdb4\" (UniqueName: \"kubernetes.io/projected/5a8a3edb-6fe7-4597-8e99-2ac664634b00-kube-api-access-gvdb4\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.706918 4967 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.706927 4967 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5a8a3edb-6fe7-4597-8e99-2ac664634b00-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.786861 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.793446 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.909360 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts\") pod \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.909426 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6rt7\" (UniqueName: \"kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7\") pod \"6e949c27-e236-4411-b306-eb9e7d3385f4\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.909584 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts\") pod \"6e949c27-e236-4411-b306-eb9e7d3385f4\" (UID: \"6e949c27-e236-4411-b306-eb9e7d3385f4\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.909692 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8zwd\" (UniqueName: \"kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd\") pod \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\" (UID: \"5d966c29-248f-49a3-b5c8-e88deb1aa0d9\") " Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.910047 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5d966c29-248f-49a3-b5c8-e88deb1aa0d9" (UID: "5d966c29-248f-49a3-b5c8-e88deb1aa0d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.910330 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6e949c27-e236-4411-b306-eb9e7d3385f4" (UID: "6e949c27-e236-4411-b306-eb9e7d3385f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.910412 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.916230 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd" (OuterVolumeSpecName: "kube-api-access-k8zwd") pod "5d966c29-248f-49a3-b5c8-e88deb1aa0d9" (UID: "5d966c29-248f-49a3-b5c8-e88deb1aa0d9"). InnerVolumeSpecName "kube-api-access-k8zwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:48 crc kubenswrapper[4967]: I1121 15:54:48.920259 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7" (OuterVolumeSpecName: "kube-api-access-t6rt7") pod "6e949c27-e236-4411-b306-eb9e7d3385f4" (UID: "6e949c27-e236-4411-b306-eb9e7d3385f4"). InnerVolumeSpecName "kube-api-access-t6rt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.012415 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6rt7\" (UniqueName: \"kubernetes.io/projected/6e949c27-e236-4411-b306-eb9e7d3385f4-kube-api-access-t6rt7\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.012440 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e949c27-e236-4411-b306-eb9e7d3385f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.012450 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8zwd\" (UniqueName: \"kubernetes.io/projected/5d966c29-248f-49a3-b5c8-e88deb1aa0d9-kube-api-access-k8zwd\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.219994 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" event={"ID":"6e949c27-e236-4411-b306-eb9e7d3385f4","Type":"ContainerDied","Data":"8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d"} Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.220048 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f05f6f4feca090f9d3be6aa8c0011841ae0fda526dec59ad8cbf1ae7630dd5d" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.220004 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-50c0-account-create-c5xnl" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.223533 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerStarted","Data":"1686cfe04a85b0ebec1262d0d343e4110b3c2ae05cad2d59f05b8353373c9ac4"} Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.225481 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" event={"ID":"5d966c29-248f-49a3-b5c8-e88deb1aa0d9","Type":"ContainerDied","Data":"2e3bdedb588c8d9784bb1596dfccdd1a7ffa011dc7acaff4e865306d6169317d"} Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.225511 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-kn45m" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.225513 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e3bdedb588c8d9784bb1596dfccdd1a7ffa011dc7acaff4e865306d6169317d" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.227717 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-7f56c8cd-xplcm_5a8a3edb-6fe7-4597-8e99-2ac664634b00/console/0.log" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.227893 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-7f56c8cd-xplcm" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.227913 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-7f56c8cd-xplcm" event={"ID":"5a8a3edb-6fe7-4597-8e99-2ac664634b00","Type":"ContainerDied","Data":"0d4114c86c121ccb89dc273650be76bf3640839d64ee12913c2cb17e31698e56"} Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.227962 4967 scope.go:117] "RemoveContainer" containerID="bef07153af84578649ad213b08e3aff50b05f0d93e070ddd2aa06d5097230f2c" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.282355 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.289596 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-7f56c8cd-xplcm"] Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.856204 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.930891 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dntr\" (UniqueName: \"kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr\") pod \"88694819-ea7a-48ee-89fb-c9df36ca33d9\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.931143 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts\") pod \"88694819-ea7a-48ee-89fb-c9df36ca33d9\" (UID: \"88694819-ea7a-48ee-89fb-c9df36ca33d9\") " Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.931605 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88694819-ea7a-48ee-89fb-c9df36ca33d9" (UID: "88694819-ea7a-48ee-89fb-c9df36ca33d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.931863 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88694819-ea7a-48ee-89fb-c9df36ca33d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:49 crc kubenswrapper[4967]: I1121 15:54:49.937231 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr" (OuterVolumeSpecName: "kube-api-access-9dntr") pod "88694819-ea7a-48ee-89fb-c9df36ca33d9" (UID: "88694819-ea7a-48ee-89fb-c9df36ca33d9"). InnerVolumeSpecName "kube-api-access-9dntr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.033850 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dntr\" (UniqueName: \"kubernetes.io/projected/88694819-ea7a-48ee-89fb-c9df36ca33d9-kube-api-access-9dntr\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.239264 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:50 crc kubenswrapper[4967]: E1121 15:54:50.239478 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:50 crc kubenswrapper[4967]: E1121 15:54:50.239504 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.239519 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xpqkr" Nov 21 15:54:50 crc kubenswrapper[4967]: E1121 15:54:50.239558 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:54:58.239536509 +0000 UTC m=+1186.498057517 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.239575 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xpqkr" event={"ID":"88694819-ea7a-48ee-89fb-c9df36ca33d9","Type":"ContainerDied","Data":"48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0"} Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.239612 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48ec1d7020eb76484122ad785bf12fd7ea8b5cc23d39de7bf2c9d7a3ef1b58d0" Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.242916 4967 generic.go:334] "Generic (PLEG): container finished" podID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerID="a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad" exitCode=0 Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.242954 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerDied","Data":"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad"} Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.548789 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" path="/var/lib/kubelet/pods/5a8a3edb-6fe7-4597-8e99-2ac664634b00/volumes" Nov 21 15:54:50 crc kubenswrapper[4967]: I1121 15:54:50.977504 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-b5ck8" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.146:5353: i/o timeout" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.032939 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.184998 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262283 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s"] Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262783 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d966c29-248f-49a3-b5c8-e88deb1aa0d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262799 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d966c29-248f-49a3-b5c8-e88deb1aa0d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262812 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" containerName="console" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262820 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" containerName="console" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262838 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="dnsmasq-dns" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262845 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="dnsmasq-dns" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262860 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="704454f3-1078-44b7-b41f-3da332e4015f" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262867 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="704454f3-1078-44b7-b41f-3da332e4015f" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262893 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88694819-ea7a-48ee-89fb-c9df36ca33d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262901 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="88694819-ea7a-48ee-89fb-c9df36ca33d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262911 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e57707db-bae6-4223-968e-52d7ba80f7f2" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262918 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e57707db-bae6-4223-968e-52d7ba80f7f2" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262935 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb72003-35bf-4473-be52-303e57f5351d" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262943 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb72003-35bf-4473-be52-303e57f5351d" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262963 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="init" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262970 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="init" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.262984 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.262991 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.263006 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.263014 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: E1121 15:54:51.263035 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e949c27-e236-4411-b306-eb9e7d3385f4" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.263042 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e949c27-e236-4411-b306-eb9e7d3385f4" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.263960 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e949c27-e236-4411-b306-eb9e7d3385f4" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.263979 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e57707db-bae6-4223-968e-52d7ba80f7f2" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.263994 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb72003-35bf-4473-be52-303e57f5351d" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.264009 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="704454f3-1078-44b7-b41f-3da332e4015f" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.264023 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.264037 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a8a3edb-6fe7-4597-8e99-2ac664634b00" containerName="console" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.265090 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" containerName="mariadb-account-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.265128 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="151dc631-19a9-439f-89db-c3da47602908" containerName="dnsmasq-dns" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.265146 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="88694819-ea7a-48ee-89fb-c9df36ca33d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.265158 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d966c29-248f-49a3-b5c8-e88deb1aa0d9" containerName="mariadb-database-create" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.266190 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.269214 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d1ef-account-create-bpt7z" event={"ID":"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5","Type":"ContainerDied","Data":"5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648"} Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.269475 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f7df08c74c923559ac1f49498447eca8ad30207a28a2e90a1f7e0410d520648" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.269538 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d1ef-account-create-bpt7z" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.272184 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts\") pod \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.272241 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74mtj\" (UniqueName: \"kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj\") pod \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\" (UID: \"22c59946-4e7a-45d0-8cd3-b7de14d8e8f5\") " Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.272671 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" (UID: "22c59946-4e7a-45d0-8cd3-b7de14d8e8f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.273027 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.274761 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s"] Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.288200 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj" (OuterVolumeSpecName: "kube-api-access-74mtj") pod "22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" (UID: "22c59946-4e7a-45d0-8cd3-b7de14d8e8f5"). InnerVolumeSpecName "kube-api-access-74mtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.375023 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.375415 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgrzm\" (UniqueName: \"kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.375701 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74mtj\" (UniqueName: \"kubernetes.io/projected/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5-kube-api-access-74mtj\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.462086 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-fa6c-account-create-x8pf7"] Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.463468 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.466701 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.474582 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-fa6c-account-create-x8pf7"] Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.477765 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.477839 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgrzm\" (UniqueName: \"kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.482588 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.503232 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgrzm\" (UniqueName: \"kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm\") pod \"mysqld-exporter-openstack-cell1-db-create-vkb4s\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.579922 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.579991 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6fcd\" (UniqueName: \"kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.605864 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.683970 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.684943 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6fcd\" (UniqueName: \"kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.685524 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.703958 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6fcd\" (UniqueName: \"kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd\") pod \"mysqld-exporter-fa6c-account-create-x8pf7\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:51 crc kubenswrapper[4967]: I1121 15:54:51.785265 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.113073 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s"] Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.285699 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerStarted","Data":"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618"} Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.287289 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.290897 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bf2mc" event={"ID":"b1974654-371e-49f8-b8d3-701e31f82b54","Type":"ContainerStarted","Data":"34fc424c307f993395b5a5d9da7f0d627e0fd3f5c18319490a88b396e64995a3"} Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.292407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" event={"ID":"c78ece6b-57df-4cf9-add7-6c8ca602d7a8","Type":"ContainerStarted","Data":"2360446bb994b04c8035157f02cc09dac946a34b1d743acb2162dc070689ec16"} Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.311338 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.350972282 podStartE2EDuration="59.311299152s" podCreationTimestamp="2025-11-21 15:53:53 +0000 UTC" firstStartedPulling="2025-11-21 15:53:55.620160567 +0000 UTC m=+1123.878681575" lastFinishedPulling="2025-11-21 15:54:16.580487437 +0000 UTC m=+1144.839008445" observedRunningTime="2025-11-21 15:54:52.310514499 +0000 UTC m=+1180.569035507" watchObservedRunningTime="2025-11-21 15:54:52.311299152 +0000 UTC m=+1180.569820160" Nov 21 15:54:52 crc kubenswrapper[4967]: I1121 15:54:52.330707 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-bf2mc" podStartSLOduration=6.008520013 podStartE2EDuration="10.330687857s" podCreationTimestamp="2025-11-21 15:54:42 +0000 UTC" firstStartedPulling="2025-11-21 15:54:46.717891949 +0000 UTC m=+1174.976412968" lastFinishedPulling="2025-11-21 15:54:51.040059804 +0000 UTC m=+1179.298580812" observedRunningTime="2025-11-21 15:54:52.329667138 +0000 UTC m=+1180.588188146" watchObservedRunningTime="2025-11-21 15:54:52.330687857 +0000 UTC m=+1180.589208865" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.249525 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-fa6c-account-create-x8pf7"] Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.303792 4967 generic.go:334] "Generic (PLEG): container finished" podID="c78ece6b-57df-4cf9-add7-6c8ca602d7a8" containerID="6a6e1e3b41c90eec8fb9782c755a74d169a44531655e0544b2ae43dd80c5edfb" exitCode=0 Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.303892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" event={"ID":"c78ece6b-57df-4cf9-add7-6c8ca602d7a8","Type":"ContainerDied","Data":"6a6e1e3b41c90eec8fb9782c755a74d169a44531655e0544b2ae43dd80c5edfb"} Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.776159 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-9fbft"] Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.778337 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.781022 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.781453 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2sd9k" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.800194 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9fbft"] Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.833704 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs4df\" (UniqueName: \"kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.833769 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.833797 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.833880 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.935849 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs4df\" (UniqueName: \"kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.935909 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.935937 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.936032 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.942143 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.942855 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.943884 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:53 crc kubenswrapper[4967]: I1121 15:54:53.955380 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs4df\" (UniqueName: \"kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df\") pod \"glance-db-sync-9fbft\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.128749 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9fbft" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.323757 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" event={"ID":"7775b65a-c6fb-4eda-9006-182b889c4a0b","Type":"ContainerStarted","Data":"cec0cd7e9016c4fcc4212cc41c4e9cdfcbad4f5c2b211832297965a7dfff8952"} Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.324409 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" event={"ID":"7775b65a-c6fb-4eda-9006-182b889c4a0b","Type":"ContainerStarted","Data":"f9a8be2e2aa64faa3eba22b6472edd25cbf9fa6852bfcc69e9289e56ad49d005"} Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.335199 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerStarted","Data":"eb63434d7c67ec3122db53c7949f3b3b6264731cf0d81f7c31da6df51ce6ffdb"} Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.353887 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" podStartSLOduration=3.3538689870000002 podStartE2EDuration="3.353868987s" podCreationTimestamp="2025-11-21 15:54:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:54:54.34527107 +0000 UTC m=+1182.603792088" watchObservedRunningTime="2025-11-21 15:54:54.353868987 +0000 UTC m=+1182.612389995" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.382739 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.907523871 podStartE2EDuration="54.382719824s" podCreationTimestamp="2025-11-21 15:54:00 +0000 UTC" firstStartedPulling="2025-11-21 15:54:16.523150584 +0000 UTC m=+1144.781671592" lastFinishedPulling="2025-11-21 15:54:53.998346537 +0000 UTC m=+1182.256867545" observedRunningTime="2025-11-21 15:54:54.377257507 +0000 UTC m=+1182.635778525" watchObservedRunningTime="2025-11-21 15:54:54.382719824 +0000 UTC m=+1182.641240832" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.716670 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-9fbft"] Nov 21 15:54:54 crc kubenswrapper[4967]: W1121 15:54:54.726910 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc07e264_27b3_4f82_b96e_04ef32de4c2c.slice/crio-853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad WatchSource:0}: Error finding container 853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad: Status 404 returned error can't find the container with id 853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.747341 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.859435 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts\") pod \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.859572 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgrzm\" (UniqueName: \"kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm\") pod \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\" (UID: \"c78ece6b-57df-4cf9-add7-6c8ca602d7a8\") " Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.860057 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c78ece6b-57df-4cf9-add7-6c8ca602d7a8" (UID: "c78ece6b-57df-4cf9-add7-6c8ca602d7a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.860677 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.865501 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm" (OuterVolumeSpecName: "kube-api-access-vgrzm") pod "c78ece6b-57df-4cf9-add7-6c8ca602d7a8" (UID: "c78ece6b-57df-4cf9-add7-6c8ca602d7a8"). InnerVolumeSpecName "kube-api-access-vgrzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:54 crc kubenswrapper[4967]: I1121 15:54:54.962643 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgrzm\" (UniqueName: \"kubernetes.io/projected/c78ece6b-57df-4cf9-add7-6c8ca602d7a8-kube-api-access-vgrzm\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.343103 4967 generic.go:334] "Generic (PLEG): container finished" podID="7775b65a-c6fb-4eda-9006-182b889c4a0b" containerID="cec0cd7e9016c4fcc4212cc41c4e9cdfcbad4f5c2b211832297965a7dfff8952" exitCode=0 Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.343174 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" event={"ID":"7775b65a-c6fb-4eda-9006-182b889c4a0b","Type":"ContainerDied","Data":"cec0cd7e9016c4fcc4212cc41c4e9cdfcbad4f5c2b211832297965a7dfff8952"} Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.359738 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.359770 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s" event={"ID":"c78ece6b-57df-4cf9-add7-6c8ca602d7a8","Type":"ContainerDied","Data":"2360446bb994b04c8035157f02cc09dac946a34b1d743acb2162dc070689ec16"} Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.359813 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2360446bb994b04c8035157f02cc09dac946a34b1d743acb2162dc070689ec16" Nov 21 15:54:55 crc kubenswrapper[4967]: I1121 15:54:55.361105 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9fbft" event={"ID":"fc07e264-27b3-4f82-b96e-04ef32de4c2c","Type":"ContainerStarted","Data":"853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad"} Nov 21 15:54:56 crc kubenswrapper[4967]: I1121 15:54:56.587508 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:54:56 crc kubenswrapper[4967]: I1121 15:54:56.662351 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:54:56 crc kubenswrapper[4967]: I1121 15:54:56.662578 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="dnsmasq-dns" containerID="cri-o://3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054" gracePeriod=10 Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.023830 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.114550 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts\") pod \"7775b65a-c6fb-4eda-9006-182b889c4a0b\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.114660 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6fcd\" (UniqueName: \"kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd\") pod \"7775b65a-c6fb-4eda-9006-182b889c4a0b\" (UID: \"7775b65a-c6fb-4eda-9006-182b889c4a0b\") " Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.115014 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7775b65a-c6fb-4eda-9006-182b889c4a0b" (UID: "7775b65a-c6fb-4eda-9006-182b889c4a0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.130507 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd" (OuterVolumeSpecName: "kube-api-access-d6fcd") pod "7775b65a-c6fb-4eda-9006-182b889c4a0b" (UID: "7775b65a-c6fb-4eda-9006-182b889c4a0b"). InnerVolumeSpecName "kube-api-access-d6fcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.217179 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7775b65a-c6fb-4eda-9006-182b889c4a0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.217215 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6fcd\" (UniqueName: \"kubernetes.io/projected/7775b65a-c6fb-4eda-9006-182b889c4a0b-kube-api-access-d6fcd\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.284006 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.292040 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.383822 4967 generic.go:334] "Generic (PLEG): container finished" podID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerID="3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054" exitCode=0 Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.383910 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" event={"ID":"95ecc930-ddf3-4ced-a9e8-ade44ada5666","Type":"ContainerDied","Data":"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054"} Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.383929 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.383984 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-fdphh" event={"ID":"95ecc930-ddf3-4ced-a9e8-ade44ada5666","Type":"ContainerDied","Data":"363e36075dd5f107a287414417fe17a2425392f49eb14ab339ae18450a510852"} Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.384013 4967 scope.go:117] "RemoveContainer" containerID="3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.387921 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" event={"ID":"7775b65a-c6fb-4eda-9006-182b889c4a0b","Type":"ContainerDied","Data":"f9a8be2e2aa64faa3eba22b6472edd25cbf9fa6852bfcc69e9289e56ad49d005"} Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.387962 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9a8be2e2aa64faa3eba22b6472edd25cbf9fa6852bfcc69e9289e56ad49d005" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.388020 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-fa6c-account-create-x8pf7" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.412146 4967 scope.go:117] "RemoveContainer" containerID="7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.420697 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z784j\" (UniqueName: \"kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j\") pod \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.420931 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc\") pod \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.421073 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config\") pod \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\" (UID: \"95ecc930-ddf3-4ced-a9e8-ade44ada5666\") " Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.426217 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j" (OuterVolumeSpecName: "kube-api-access-z784j") pod "95ecc930-ddf3-4ced-a9e8-ade44ada5666" (UID: "95ecc930-ddf3-4ced-a9e8-ade44ada5666"). InnerVolumeSpecName "kube-api-access-z784j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.468592 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "95ecc930-ddf3-4ced-a9e8-ade44ada5666" (UID: "95ecc930-ddf3-4ced-a9e8-ade44ada5666"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.470335 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config" (OuterVolumeSpecName: "config") pod "95ecc930-ddf3-4ced-a9e8-ade44ada5666" (UID: "95ecc930-ddf3-4ced-a9e8-ade44ada5666"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.471333 4967 scope.go:117] "RemoveContainer" containerID="3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054" Nov 21 15:54:57 crc kubenswrapper[4967]: E1121 15:54:57.471850 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054\": container with ID starting with 3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054 not found: ID does not exist" containerID="3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.471890 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054"} err="failed to get container status \"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054\": rpc error: code = NotFound desc = could not find container \"3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054\": container with ID starting with 3fa60fa7efe1ca9064213b74f17e7bb192b2769b9f9824137c1a9a217fad5054 not found: ID does not exist" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.471917 4967 scope.go:117] "RemoveContainer" containerID="7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21" Nov 21 15:54:57 crc kubenswrapper[4967]: E1121 15:54:57.472219 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21\": container with ID starting with 7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21 not found: ID does not exist" containerID="7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.472245 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21"} err="failed to get container status \"7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21\": rpc error: code = NotFound desc = could not find container \"7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21\": container with ID starting with 7ab9162cc460102b7080d9b9cd7d9a088b4e65acfbb7b31e014d83b8e608dd21 not found: ID does not exist" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.523659 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.523703 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/95ecc930-ddf3-4ced-a9e8-ade44ada5666-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.523719 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z784j\" (UniqueName: \"kubernetes.io/projected/95ecc930-ddf3-4ced-a9e8-ade44ada5666-kube-api-access-z784j\") on node \"crc\" DevicePath \"\"" Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.738527 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:54:57 crc kubenswrapper[4967]: I1121 15:54:57.745772 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-fdphh"] Nov 21 15:54:58 crc kubenswrapper[4967]: I1121 15:54:58.248570 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:54:58 crc kubenswrapper[4967]: E1121 15:54:58.248827 4967 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 21 15:54:58 crc kubenswrapper[4967]: E1121 15:54:58.249001 4967 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 21 15:54:58 crc kubenswrapper[4967]: E1121 15:54:58.249072 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift podName:4bf5cb6a-c8f8-43c3-b546-282bfd3244e2 nodeName:}" failed. No retries permitted until 2025-11-21 15:55:14.249051433 +0000 UTC m=+1202.507572441 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift") pod "swift-storage-0" (UID: "4bf5cb6a-c8f8-43c3-b546-282bfd3244e2") : configmap "swift-ring-files" not found Nov 21 15:54:58 crc kubenswrapper[4967]: I1121 15:54:58.555730 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" path="/var/lib/kubelet/pods/95ecc930-ddf3-4ced-a9e8-ade44ada5666/volumes" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.343538 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-5c2pr" podUID="e04788f9-f223-46ef-b96b-24e05c5d911f" containerName="ovn-controller" probeResult="failure" output=< Nov 21 15:54:59 crc kubenswrapper[4967]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 21 15:54:59 crc kubenswrapper[4967]: > Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.385545 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.387179 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-n8r27" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.745467 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-5c2pr-config-p5bsc"] Nov 21 15:54:59 crc kubenswrapper[4967]: E1121 15:54:59.746141 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="init" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746159 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="init" Nov 21 15:54:59 crc kubenswrapper[4967]: E1121 15:54:59.746185 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c78ece6b-57df-4cf9-add7-6c8ca602d7a8" containerName="mariadb-database-create" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746192 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c78ece6b-57df-4cf9-add7-6c8ca602d7a8" containerName="mariadb-database-create" Nov 21 15:54:59 crc kubenswrapper[4967]: E1121 15:54:59.746204 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="dnsmasq-dns" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746210 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="dnsmasq-dns" Nov 21 15:54:59 crc kubenswrapper[4967]: E1121 15:54:59.746227 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7775b65a-c6fb-4eda-9006-182b889c4a0b" containerName="mariadb-account-create" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746233 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7775b65a-c6fb-4eda-9006-182b889c4a0b" containerName="mariadb-account-create" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746423 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="95ecc930-ddf3-4ced-a9e8-ade44ada5666" containerName="dnsmasq-dns" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746441 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="c78ece6b-57df-4cf9-add7-6c8ca602d7a8" containerName="mariadb-database-create" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.746455 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7775b65a-c6fb-4eda-9006-182b889c4a0b" containerName="mariadb-account-create" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.747196 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.754982 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5c2pr-config-p5bsc"] Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.761513 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.791866 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.791966 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.792010 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwgxn\" (UniqueName: \"kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.792049 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.792118 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.792169 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.895581 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896341 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896616 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896771 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwgxn\" (UniqueName: \"kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896934 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.897085 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896644 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896689 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.897442 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.896832 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.899263 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:54:59 crc kubenswrapper[4967]: I1121 15:54:59.917923 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwgxn\" (UniqueName: \"kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn\") pod \"ovn-controller-5c2pr-config-p5bsc\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:55:00 crc kubenswrapper[4967]: I1121 15:55:00.116018 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:55:00 crc kubenswrapper[4967]: I1121 15:55:00.421733 4967 generic.go:334] "Generic (PLEG): container finished" podID="b1974654-371e-49f8-b8d3-701e31f82b54" containerID="34fc424c307f993395b5a5d9da7f0d627e0fd3f5c18319490a88b396e64995a3" exitCode=0 Nov 21 15:55:00 crc kubenswrapper[4967]: I1121 15:55:00.421997 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bf2mc" event={"ID":"b1974654-371e-49f8-b8d3-701e31f82b54","Type":"ContainerDied","Data":"34fc424c307f993395b5a5d9da7f0d627e0fd3f5c18319490a88b396e64995a3"} Nov 21 15:55:00 crc kubenswrapper[4967]: I1121 15:55:00.584056 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-5c2pr-config-p5bsc"] Nov 21 15:55:00 crc kubenswrapper[4967]: W1121 15:55:00.585936 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1e142d01_de55_4533_9173_25afe8e1b576.slice/crio-6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601 WatchSource:0}: Error finding container 6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601: Status 404 returned error can't find the container with id 6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601 Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.434409 4967 generic.go:334] "Generic (PLEG): container finished" podID="1e142d01-de55-4533-9173-25afe8e1b576" containerID="f1740d83b1d7b6bd9ceb629dfc0ce7360522feb97d85fc0b3f71e0479c90ecb9" exitCode=0 Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.434605 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5c2pr-config-p5bsc" event={"ID":"1e142d01-de55-4533-9173-25afe8e1b576","Type":"ContainerDied","Data":"f1740d83b1d7b6bd9ceb629dfc0ce7360522feb97d85fc0b3f71e0479c90ecb9"} Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.437026 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5c2pr-config-p5bsc" event={"ID":"1e142d01-de55-4533-9173-25afe8e1b576","Type":"ContainerStarted","Data":"6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601"} Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.645823 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.647212 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.653826 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.654011 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.735077 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.735185 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9gqt\" (UniqueName: \"kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.735229 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.836672 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.836775 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9gqt\" (UniqueName: \"kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.836816 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.842760 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.844694 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.853244 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9gqt\" (UniqueName: \"kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt\") pod \"mysqld-exporter-0\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " pod="openstack/mysqld-exporter-0" Nov 21 15:55:01 crc kubenswrapper[4967]: I1121 15:55:01.991262 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:55:02 crc kubenswrapper[4967]: I1121 15:55:02.291323 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:02 crc kubenswrapper[4967]: I1121 15:55:02.295734 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:02 crc kubenswrapper[4967]: I1121 15:55:02.472689 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:04 crc kubenswrapper[4967]: I1121 15:55:04.344895 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-5c2pr" Nov 21 15:55:04 crc kubenswrapper[4967]: I1121 15:55:04.958068 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.004743 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.005003 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="prometheus" containerID="cri-o://3e971a9fb150a0db0857418622c0514aa345a0c015fa52a0c77ac69a937c3d2a" gracePeriod=600 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.005095 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="config-reloader" containerID="cri-o://1686cfe04a85b0ebec1262d0d343e4110b3c2ae05cad2d59f05b8353373c9ac4" gracePeriod=600 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.005095 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="thanos-sidecar" containerID="cri-o://eb63434d7c67ec3122db53c7949f3b3b6264731cf0d81f7c31da6df51ce6ffdb" gracePeriod=600 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.434689 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-twljx"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.436447 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.450996 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-twljx"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.511348 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-fca9-account-create-2qmtv"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.513158 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.518741 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.520109 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.520166 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqtcx\" (UniqueName: \"kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526807 4967 generic.go:334] "Generic (PLEG): container finished" podID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerID="eb63434d7c67ec3122db53c7949f3b3b6264731cf0d81f7c31da6df51ce6ffdb" exitCode=0 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526840 4967 generic.go:334] "Generic (PLEG): container finished" podID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerID="1686cfe04a85b0ebec1262d0d343e4110b3c2ae05cad2d59f05b8353373c9ac4" exitCode=0 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526849 4967 generic.go:334] "Generic (PLEG): container finished" podID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerID="3e971a9fb150a0db0857418622c0514aa345a0c015fa52a0c77ac69a937c3d2a" exitCode=0 Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526869 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerDied","Data":"eb63434d7c67ec3122db53c7949f3b3b6264731cf0d81f7c31da6df51ce6ffdb"} Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526928 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerDied","Data":"1686cfe04a85b0ebec1262d0d343e4110b3c2ae05cad2d59f05b8353373c9ac4"} Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.526951 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerDied","Data":"3e971a9fb150a0db0857418622c0514aa345a0c015fa52a0c77ac69a937c3d2a"} Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.532350 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fca9-account-create-2qmtv"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.622692 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.623209 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq8jm\" (UniqueName: \"kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.623279 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.623434 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqtcx\" (UniqueName: \"kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.696377 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-gkkl8"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.698091 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.699781 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.700452 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.701291 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.703458 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lbgzv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.711499 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gkkl8"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.725237 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.725382 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq8jm\" (UniqueName: \"kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.733547 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-e255-account-create-t44dj"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.736009 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.738724 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.748458 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-msr6l"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.760160 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.768381 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-e255-account-create-t44dj"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.784158 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.809600 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqtcx\" (UniqueName: \"kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx\") pod \"heat-db-create-twljx\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " pod="openstack/heat-db-create-twljx" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.853595 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.853663 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.853801 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.853888 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64czj\" (UniqueName: \"kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.853940 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dkd6\" (UniqueName: \"kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.876377 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-msr6l"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.881020 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.894435 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq8jm\" (UniqueName: \"kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm\") pod \"heat-fca9-account-create-2qmtv\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957731 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957820 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957840 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957880 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957919 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64czj\" (UniqueName: \"kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957943 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dkd6\" (UniqueName: \"kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.957990 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j868b\" (UniqueName: \"kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.958872 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.962150 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.962737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.976872 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64czj\" (UniqueName: \"kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj\") pod \"cinder-e255-account-create-t44dj\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.980752 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-d7rdb"] Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.982372 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:05 crc kubenswrapper[4967]: I1121 15:55:05.991797 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dkd6\" (UniqueName: \"kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6\") pod \"keystone-db-sync-gkkl8\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.007455 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-1a58-account-create-j6kfx"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.009251 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.010865 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.037568 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-d7rdb"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.048060 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1a58-account-create-j6kfx"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.053331 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-twljx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.056225 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-r44s6"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.058052 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.059885 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j868b\" (UniqueName: \"kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.059981 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.060018 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.060110 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcgm7\" (UniqueName: \"kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.065832 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.067017 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-r44s6"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.095084 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j868b\" (UniqueName: \"kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b\") pod \"cinder-db-create-msr6l\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.102830 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5922-account-create-sq5cw"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.103454 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.104959 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.109988 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.136924 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.138412 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5922-account-create-sq5cw"] Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.140658 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.144021 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161578 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssxfl\" (UniqueName: \"kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161640 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161720 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161816 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcgm7\" (UniqueName: \"kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161847 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flpg8\" (UniqueName: \"kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.161882 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.162760 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.184030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcgm7\" (UniqueName: \"kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7\") pod \"barbican-db-create-d7rdb\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263324 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263415 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssxfl\" (UniqueName: \"kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263449 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w54mt\" (UniqueName: \"kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263491 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263641 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flpg8\" (UniqueName: \"kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.263718 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.264729 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.264787 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.281729 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flpg8\" (UniqueName: \"kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8\") pod \"neutron-db-create-r44s6\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.283615 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssxfl\" (UniqueName: \"kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl\") pod \"barbican-1a58-account-create-j6kfx\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.365563 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.365626 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w54mt\" (UniqueName: \"kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.366669 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.384577 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w54mt\" (UniqueName: \"kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt\") pod \"neutron-5922-account-create-sq5cw\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.395726 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.446820 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.457716 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:06 crc kubenswrapper[4967]: I1121 15:55:06.469190 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:07 crc kubenswrapper[4967]: I1121 15:55:07.292697 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.138:9090/-/ready\": dial tcp 10.217.0.138:9090: connect: connection refused" Nov 21 15:55:08 crc kubenswrapper[4967]: I1121 15:55:08.561620 4967 generic.go:334] "Generic (PLEG): container finished" podID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerID="80b38e9eacd01d13b36f4476fb0cc8a6b6b79cf32296b5aa4a7ba23654c8c79c" exitCode=0 Nov 21 15:55:08 crc kubenswrapper[4967]: I1121 15:55:08.561701 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerDied","Data":"80b38e9eacd01d13b36f4476fb0cc8a6b6b79cf32296b5aa4a7ba23654c8c79c"} Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.521155 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.523612 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.579690 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bf2mc" event={"ID":"b1974654-371e-49f8-b8d3-701e31f82b54","Type":"ContainerDied","Data":"e0c573949eaf9559589983b28f8a468281be5f602e90170995c432bde32dff2d"} Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.580379 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0c573949eaf9559589983b28f8a468281be5f602e90170995c432bde32dff2d" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.579702 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bf2mc" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.584700 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-5c2pr-config-p5bsc" event={"ID":"1e142d01-de55-4533-9173-25afe8e1b576","Type":"ContainerDied","Data":"6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601"} Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.584751 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ab03b80ea6fc0c153d373e72bb8ac31f64c797bd9bf4d3447aa6b55da296601" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.584821 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-5c2pr-config-p5bsc" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651506 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651611 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651601 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run" (OuterVolumeSpecName: "var-run") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651648 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651656 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651670 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651690 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwgxn\" (UniqueName: \"kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651727 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651788 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8m9r\" (UniqueName: \"kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651831 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651858 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651906 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651954 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.651984 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts\") pod \"1e142d01-de55-4533-9173-25afe8e1b576\" (UID: \"1e142d01-de55-4533-9173-25afe8e1b576\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.652044 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift\") pod \"b1974654-371e-49f8-b8d3-701e31f82b54\" (UID: \"b1974654-371e-49f8-b8d3-701e31f82b54\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.652534 4967 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.652551 4967 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.653535 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.654382 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.654813 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.656038 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts" (OuterVolumeSpecName: "scripts") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.657346 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.667493 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r" (OuterVolumeSpecName: "kube-api-access-d8m9r") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "kube-api-access-d8m9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.667605 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn" (OuterVolumeSpecName: "kube-api-access-kwgxn") pod "1e142d01-de55-4533-9173-25afe8e1b576" (UID: "1e142d01-de55-4533-9173-25afe8e1b576"). InnerVolumeSpecName "kube-api-access-kwgxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.680274 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.692500 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.709382 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts" (OuterVolumeSpecName: "scripts") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758851 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758874 4967 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758885 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwgxn\" (UniqueName: \"kubernetes.io/projected/1e142d01-de55-4533-9173-25afe8e1b576-kube-api-access-kwgxn\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758894 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758902 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8m9r\" (UniqueName: \"kubernetes.io/projected/b1974654-371e-49f8-b8d3-701e31f82b54-kube-api-access-d8m9r\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758910 4967 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/b1974654-371e-49f8-b8d3-701e31f82b54-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758921 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758929 4967 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1e142d01-de55-4533-9173-25afe8e1b576-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758937 4967 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1e142d01-de55-4533-9173-25afe8e1b576-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.758944 4967 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/b1974654-371e-49f8-b8d3-701e31f82b54-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.765814 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "b1974654-371e-49f8-b8d3-701e31f82b54" (UID: "b1974654-371e-49f8-b8d3-701e31f82b54"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.793135 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.861763 4967 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/b1974654-371e-49f8-b8d3-701e31f82b54-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.963400 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.963538 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.963590 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.963749 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9zs6\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.963868 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.964003 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.964069 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.964137 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config\") pod \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\" (UID: \"d44bd64f-3c97-4cd8-be5e-2cabe45480a0\") " Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.975385 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.979452 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.981632 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config" (OuterVolumeSpecName: "config") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.981731 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out" (OuterVolumeSpecName: "config-out") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:55:09 crc kubenswrapper[4967]: I1121 15:55:09.983462 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.012661 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config" (OuterVolumeSpecName: "web-config") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.012837 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6" (OuterVolumeSpecName: "kube-api-access-t9zs6") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "kube-api-access-t9zs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.013012 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "d44bd64f-3c97-4cd8-be5e-2cabe45480a0" (UID: "d44bd64f-3c97-4cd8-be5e-2cabe45480a0"). InnerVolumeSpecName "pvc-6f9cd499-a5be-4050-a347-d7b9097aa028". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071871 4967 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config-out\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071939 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") on node \"crc\" " Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071957 4967 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071971 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071981 4967 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.071991 4967 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-web-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.072002 4967 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.072017 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9zs6\" (UniqueName: \"kubernetes.io/projected/d44bd64f-3c97-4cd8-be5e-2cabe45480a0-kube-api-access-t9zs6\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.130808 4967 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.130942 4967 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-6f9cd499-a5be-4050-a347-d7b9097aa028" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028") on node "crc" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.174095 4967 reconciler_common.go:293] "Volume detached for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.371874 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-fca9-account-create-2qmtv"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.609031 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerStarted","Data":"02d79e1d71d3c681e219adebcff3bc382e41de298e14be461f68c747348a0a41"} Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.610377 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.648092 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"d44bd64f-3c97-4cd8-be5e-2cabe45480a0","Type":"ContainerDied","Data":"3bf39713487c2db7e8d15561184c200b35e5192035d5c33ed72a2df5a1cb939b"} Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.648150 4967 scope.go:117] "RemoveContainer" containerID="eb63434d7c67ec3122db53c7949f3b3b6264731cf0d81f7c31da6df51ce6ffdb" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.648347 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.654525 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fca9-account-create-2qmtv" event={"ID":"3813cbde-1076-466c-b72a-94ffa3741ef1","Type":"ContainerStarted","Data":"5d8c5d4c5ba36dbec17ff7a1568ffc52131a93b45b9f132f158cf0cc67bcdfef"} Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.656768 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371959.19803 podStartE2EDuration="1m17.656744573s" podCreationTimestamp="2025-11-21 15:53:53 +0000 UTC" firstStartedPulling="2025-11-21 15:53:55.798465518 +0000 UTC m=+1124.056986516" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:10.643611276 +0000 UTC m=+1198.902132284" watchObservedRunningTime="2025-11-21 15:55:10.656744573 +0000 UTC m=+1198.915265581" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.700404 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.711863 4967 scope.go:117] "RemoveContainer" containerID="1686cfe04a85b0ebec1262d0d343e4110b3c2ae05cad2d59f05b8353373c9ac4" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.750732 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.813880 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-5c2pr-config-p5bsc"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.846441 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-5c2pr-config-p5bsc"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.866158 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-twljx"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.877999 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-1a58-account-create-j6kfx"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890053 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890609 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="init-config-reloader" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890625 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="init-config-reloader" Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890647 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e142d01-de55-4533-9173-25afe8e1b576" containerName="ovn-config" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890653 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e142d01-de55-4533-9173-25afe8e1b576" containerName="ovn-config" Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890665 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="thanos-sidecar" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890672 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="thanos-sidecar" Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890682 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1974654-371e-49f8-b8d3-701e31f82b54" containerName="swift-ring-rebalance" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890690 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1974654-371e-49f8-b8d3-701e31f82b54" containerName="swift-ring-rebalance" Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890700 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="config-reloader" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890707 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="config-reloader" Nov 21 15:55:10 crc kubenswrapper[4967]: E1121 15:55:10.890723 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="prometheus" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890729 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="prometheus" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890906 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1974654-371e-49f8-b8d3-701e31f82b54" containerName="swift-ring-rebalance" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890921 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="prometheus" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890932 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e142d01-de55-4533-9173-25afe8e1b576" containerName="ovn-config" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890945 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="thanos-sidecar" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.890956 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" containerName="config-reloader" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.893672 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.895707 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.896604 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-lsqp4" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.896864 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.897053 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.897201 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.897329 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.898073 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.903003 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.904679 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.905385 4967 scope.go:117] "RemoveContainer" containerID="3e971a9fb150a0db0857418622c0514aa345a0c015fa52a0c77ac69a937c3d2a" Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.920387 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-msr6l"] Nov 21 15:55:10 crc kubenswrapper[4967]: I1121 15:55:10.943257 4967 scope.go:117] "RemoveContainer" containerID="4d1c38784eb7fab979868a378c3864fcbd98b4b458a3d037be3227c959d81d16" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.028976 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029064 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029090 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf2b8\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-kube-api-access-nf2b8\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029127 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029147 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029436 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029466 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029502 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029522 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029588 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.029637 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.131791 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132134 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132292 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132444 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132492 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132561 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132592 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf2b8\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-kube-api-access-nf2b8\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132641 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132660 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132748 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.132775 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.135094 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.138595 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-r44s6"] Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.141909 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.143014 4967 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.144560 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/bfa69cc79b34a22cf414a992ebcad53d044f622cf1f4723e377bf86e9c5e1255/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.143048 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.143765 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.145173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.148427 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.156878 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.161521 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.159908 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.167908 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf2b8\" (UniqueName: \"kubernetes.io/projected/9877cab6-ed78-4e94-83c9-b2a127e3b7b0-kube-api-access-nf2b8\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.175723 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-e255-account-create-t44dj"] Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.186181 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5922-account-create-sq5cw"] Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.202529 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-d7rdb"] Nov 21 15:55:11 crc kubenswrapper[4967]: W1121 15:55:11.219122 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9505ac95_12b0_426a_a2b5_42f13ec2fad8.slice/crio-958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484 WatchSource:0}: Error finding container 958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484: Status 404 returned error can't find the container with id 958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484 Nov 21 15:55:11 crc kubenswrapper[4967]: W1121 15:55:11.230276 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6e85c34_bd76_47aa_b120_3410545d53f0.slice/crio-95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407 WatchSource:0}: Error finding container 95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407: Status 404 returned error can't find the container with id 95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407 Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.257349 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-gkkl8"] Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.288906 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6f9cd499-a5be-4050-a347-d7b9097aa028\") pod \"prometheus-metric-storage-0\" (UID: \"9877cab6-ed78-4e94-83c9-b2a127e3b7b0\") " pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.530677 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.687521 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-msr6l" event={"ID":"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1","Type":"ContainerStarted","Data":"36df21bc6423db1db74e4b92fc50f81ee7f70405d8d51b384f53ebefe73dbd1d"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.687847 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-msr6l" event={"ID":"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1","Type":"ContainerStarted","Data":"5d09e3a1e4217d68427f7c2182f00760c1764a87d13f83db3355cb4d251a7821"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.721118 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-msr6l" podStartSLOduration=6.721063419 podStartE2EDuration="6.721063419s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:11.709788126 +0000 UTC m=+1199.968309134" watchObservedRunningTime="2025-11-21 15:55:11.721063419 +0000 UTC m=+1199.979584427" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.724754 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e255-account-create-t44dj" event={"ID":"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1","Type":"ContainerStarted","Data":"760965ddb3cf68f6983ffb2be89964db76717533fbde37691c818aae2f99164a"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.724786 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e255-account-create-t44dj" event={"ID":"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1","Type":"ContainerStarted","Data":"360966f604b98ef7d2aa9c223b1cd384a82ac31670762b5b11670fd134f40348"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.725982 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"58555aef-0397-4247-be17-7efcbbb36fca","Type":"ContainerStarted","Data":"ff0b6a272dd7f48b8b450f64f9827e7afab6ff6b2692e94d193e85aca03ca4e5"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.728738 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r44s6" event={"ID":"ab58e136-893e-4b70-a0a3-d259b234dfcc","Type":"ContainerStarted","Data":"52ec31461f5f536998db68b1facaafdaf884171e3209f16130e681ab88af74ab"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.728771 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r44s6" event={"ID":"ab58e136-893e-4b70-a0a3-d259b234dfcc","Type":"ContainerStarted","Data":"f7b9c26793aa645a0d910dff802970af2828be3e5dd892b3bac8fab045a8bdc1"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.734481 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gkkl8" event={"ID":"0dab33d9-b2f0-4884-97ba-047b7772da9a","Type":"ContainerStarted","Data":"d15eae1bb51b8f4ddcc79a1ba6d08062debfb4d55217804a901d56436caeb989"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.749063 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-e255-account-create-t44dj" podStartSLOduration=6.749044181 podStartE2EDuration="6.749044181s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:11.742492103 +0000 UTC m=+1200.001013111" watchObservedRunningTime="2025-11-21 15:55:11.749044181 +0000 UTC m=+1200.007565179" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.753126 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a58-account-create-j6kfx" event={"ID":"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f","Type":"ContainerStarted","Data":"d56e59317bc94138f845ca502ab20844e78a940e21624aed790ce50c0adbfaea"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.753163 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a58-account-create-j6kfx" event={"ID":"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f","Type":"ContainerStarted","Data":"57c0c7f1b21396f700016c15a75d5618eaca45f01ed9108ee6c5618d2de35411"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.770831 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d7rdb" event={"ID":"b6e85c34-bd76-47aa-b120-3410545d53f0","Type":"ContainerStarted","Data":"94563cc8df986eb98b7cbee5998d256323f19ea02b9887a26782a03fef987cd2"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.770866 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d7rdb" event={"ID":"b6e85c34-bd76-47aa-b120-3410545d53f0","Type":"ContainerStarted","Data":"95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.771713 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-r44s6" podStartSLOduration=6.7716988 podStartE2EDuration="6.7716988s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:11.76995086 +0000 UTC m=+1200.028471868" watchObservedRunningTime="2025-11-21 15:55:11.7716988 +0000 UTC m=+1200.030219808" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.795548 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-twljx" event={"ID":"9c5930c6-1d89-4ef4-bd96-f290177d2aff","Type":"ContainerStarted","Data":"eacd3d8499a4eb3cd0475fa343a858525c67ec738545e353f5d4a3146f204012"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.795592 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-twljx" event={"ID":"9c5930c6-1d89-4ef4-bd96-f290177d2aff","Type":"ContainerStarted","Data":"467deee6c3a504c155977612b5b9b9310742a02ac2546bed1bf99f1c4d41cab8"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.811189 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5922-account-create-sq5cw" event={"ID":"9505ac95-12b0-426a-a2b5-42f13ec2fad8","Type":"ContainerStarted","Data":"05e9a90e42b8160efd190cfe43fc2add63e216db73cb1ba55743cac7188cb0f4"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.811452 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5922-account-create-sq5cw" event={"ID":"9505ac95-12b0-426a-a2b5-42f13ec2fad8","Type":"ContainerStarted","Data":"958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.818968 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9fbft" event={"ID":"fc07e264-27b3-4f82-b96e-04ef32de4c2c","Type":"ContainerStarted","Data":"2d2364867360eb071358a2df33122fcfac9db14bb17e08adcd21243d636a2259"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.823173 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-d7rdb" podStartSLOduration=6.823156845 podStartE2EDuration="6.823156845s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:11.815463025 +0000 UTC m=+1200.073984043" watchObservedRunningTime="2025-11-21 15:55:11.823156845 +0000 UTC m=+1200.081677853" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.842388 4967 generic.go:334] "Generic (PLEG): container finished" podID="3813cbde-1076-466c-b72a-94ffa3741ef1" containerID="ea9e83c624b7de77e3be2211d980926aba17815ae39534984390b145bae746a4" exitCode=0 Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.842799 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fca9-account-create-2qmtv" event={"ID":"3813cbde-1076-466c-b72a-94ffa3741ef1","Type":"ContainerDied","Data":"ea9e83c624b7de77e3be2211d980926aba17815ae39534984390b145bae746a4"} Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.855716 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5922-account-create-sq5cw" podStartSLOduration=6.855699808 podStartE2EDuration="6.855699808s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:11.85227809 +0000 UTC m=+1200.110799098" watchObservedRunningTime="2025-11-21 15:55:11.855699808 +0000 UTC m=+1200.114220816" Nov 21 15:55:11 crc kubenswrapper[4967]: I1121 15:55:11.912998 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-9fbft" podStartSLOduration=4.047182885 podStartE2EDuration="18.91298046s" podCreationTimestamp="2025-11-21 15:54:53 +0000 UTC" firstStartedPulling="2025-11-21 15:54:54.734796515 +0000 UTC m=+1182.993317513" lastFinishedPulling="2025-11-21 15:55:09.60059408 +0000 UTC m=+1197.859115088" observedRunningTime="2025-11-21 15:55:11.903995802 +0000 UTC m=+1200.162516820" watchObservedRunningTime="2025-11-21 15:55:11.91298046 +0000 UTC m=+1200.171501468" Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.085093 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.551090 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1e142d01-de55-4533-9173-25afe8e1b576" path="/var/lib/kubelet/pods/1e142d01-de55-4533-9173-25afe8e1b576/volumes" Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.552409 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d44bd64f-3c97-4cd8-be5e-2cabe45480a0" path="/var/lib/kubelet/pods/d44bd64f-3c97-4cd8-be5e-2cabe45480a0/volumes" Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.860785 4967 generic.go:334] "Generic (PLEG): container finished" podID="ab58e136-893e-4b70-a0a3-d259b234dfcc" containerID="52ec31461f5f536998db68b1facaafdaf884171e3209f16130e681ab88af74ab" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.860827 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r44s6" event={"ID":"ab58e136-893e-4b70-a0a3-d259b234dfcc","Type":"ContainerDied","Data":"52ec31461f5f536998db68b1facaafdaf884171e3209f16130e681ab88af74ab"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.863359 4967 generic.go:334] "Generic (PLEG): container finished" podID="a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" containerID="36df21bc6423db1db74e4b92fc50f81ee7f70405d8d51b384f53ebefe73dbd1d" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.863412 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-msr6l" event={"ID":"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1","Type":"ContainerDied","Data":"36df21bc6423db1db74e4b92fc50f81ee7f70405d8d51b384f53ebefe73dbd1d"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.866088 4967 generic.go:334] "Generic (PLEG): container finished" podID="62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" containerID="d56e59317bc94138f845ca502ab20844e78a940e21624aed790ce50c0adbfaea" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.866136 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a58-account-create-j6kfx" event={"ID":"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f","Type":"ContainerDied","Data":"d56e59317bc94138f845ca502ab20844e78a940e21624aed790ce50c0adbfaea"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.869037 4967 generic.go:334] "Generic (PLEG): container finished" podID="b6e85c34-bd76-47aa-b120-3410545d53f0" containerID="94563cc8df986eb98b7cbee5998d256323f19ea02b9887a26782a03fef987cd2" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.869250 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d7rdb" event={"ID":"b6e85c34-bd76-47aa-b120-3410545d53f0","Type":"ContainerDied","Data":"94563cc8df986eb98b7cbee5998d256323f19ea02b9887a26782a03fef987cd2"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.873622 4967 generic.go:334] "Generic (PLEG): container finished" podID="9c5930c6-1d89-4ef4-bd96-f290177d2aff" containerID="eacd3d8499a4eb3cd0475fa343a858525c67ec738545e353f5d4a3146f204012" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.873709 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-twljx" event={"ID":"9c5930c6-1d89-4ef4-bd96-f290177d2aff","Type":"ContainerDied","Data":"eacd3d8499a4eb3cd0475fa343a858525c67ec738545e353f5d4a3146f204012"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.882234 4967 generic.go:334] "Generic (PLEG): container finished" podID="9505ac95-12b0-426a-a2b5-42f13ec2fad8" containerID="05e9a90e42b8160efd190cfe43fc2add63e216db73cb1ba55743cac7188cb0f4" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.882335 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5922-account-create-sq5cw" event={"ID":"9505ac95-12b0-426a-a2b5-42f13ec2fad8","Type":"ContainerDied","Data":"05e9a90e42b8160efd190cfe43fc2add63e216db73cb1ba55743cac7188cb0f4"} Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.884438 4967 generic.go:334] "Generic (PLEG): container finished" podID="5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" containerID="760965ddb3cf68f6983ffb2be89964db76717533fbde37691c818aae2f99164a" exitCode=0 Nov 21 15:55:12 crc kubenswrapper[4967]: I1121 15:55:12.884597 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e255-account-create-t44dj" event={"ID":"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1","Type":"ContainerDied","Data":"760965ddb3cf68f6983ffb2be89964db76717533fbde37691c818aae2f99164a"} Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.490763 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.510177 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssxfl\" (UniqueName: \"kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl\") pod \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.510268 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts\") pod \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\" (UID: \"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.511647 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" (UID: "62996c4f-2e2b-4e94-b60d-1c2962cd7e3f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.517205 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl" (OuterVolumeSpecName: "kube-api-access-ssxfl") pod "62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" (UID: "62996c4f-2e2b-4e94-b60d-1c2962cd7e3f"). InnerVolumeSpecName "kube-api-access-ssxfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.612642 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssxfl\" (UniqueName: \"kubernetes.io/projected/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-kube-api-access-ssxfl\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.612674 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.634596 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.641425 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-twljx" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.714327 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq8jm\" (UniqueName: \"kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm\") pod \"3813cbde-1076-466c-b72a-94ffa3741ef1\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.714583 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqtcx\" (UniqueName: \"kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx\") pod \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.714617 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts\") pod \"3813cbde-1076-466c-b72a-94ffa3741ef1\" (UID: \"3813cbde-1076-466c-b72a-94ffa3741ef1\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.714652 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts\") pod \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\" (UID: \"9c5930c6-1d89-4ef4-bd96-f290177d2aff\") " Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.715378 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3813cbde-1076-466c-b72a-94ffa3741ef1" (UID: "3813cbde-1076-466c-b72a-94ffa3741ef1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.715463 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c5930c6-1d89-4ef4-bd96-f290177d2aff" (UID: "9c5930c6-1d89-4ef4-bd96-f290177d2aff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.718745 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx" (OuterVolumeSpecName: "kube-api-access-rqtcx") pod "9c5930c6-1d89-4ef4-bd96-f290177d2aff" (UID: "9c5930c6-1d89-4ef4-bd96-f290177d2aff"). InnerVolumeSpecName "kube-api-access-rqtcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.721253 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm" (OuterVolumeSpecName: "kube-api-access-gq8jm") pod "3813cbde-1076-466c-b72a-94ffa3741ef1" (UID: "3813cbde-1076-466c-b72a-94ffa3741ef1"). InnerVolumeSpecName "kube-api-access-gq8jm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.816755 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq8jm\" (UniqueName: \"kubernetes.io/projected/3813cbde-1076-466c-b72a-94ffa3741ef1-kube-api-access-gq8jm\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.816791 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqtcx\" (UniqueName: \"kubernetes.io/projected/9c5930c6-1d89-4ef4-bd96-f290177d2aff-kube-api-access-rqtcx\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.816802 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3813cbde-1076-466c-b72a-94ffa3741ef1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.816812 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c5930c6-1d89-4ef4-bd96-f290177d2aff-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.894337 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-twljx" event={"ID":"9c5930c6-1d89-4ef4-bd96-f290177d2aff","Type":"ContainerDied","Data":"467deee6c3a504c155977612b5b9b9310742a02ac2546bed1bf99f1c4d41cab8"} Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.894374 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="467deee6c3a504c155977612b5b9b9310742a02ac2546bed1bf99f1c4d41cab8" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.894358 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-twljx" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.896478 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-1a58-account-create-j6kfx" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.896527 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-1a58-account-create-j6kfx" event={"ID":"62996c4f-2e2b-4e94-b60d-1c2962cd7e3f","Type":"ContainerDied","Data":"57c0c7f1b21396f700016c15a75d5618eaca45f01ed9108ee6c5618d2de35411"} Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.896572 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57c0c7f1b21396f700016c15a75d5618eaca45f01ed9108ee6c5618d2de35411" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.898385 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-fca9-account-create-2qmtv" event={"ID":"3813cbde-1076-466c-b72a-94ffa3741ef1","Type":"ContainerDied","Data":"5d8c5d4c5ba36dbec17ff7a1568ffc52131a93b45b9f132f158cf0cc67bcdfef"} Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.898419 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d8c5d4c5ba36dbec17ff7a1568ffc52131a93b45b9f132f158cf0cc67bcdfef" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.898487 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-fca9-account-create-2qmtv" Nov 21 15:55:13 crc kubenswrapper[4967]: I1121 15:55:13.900126 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerStarted","Data":"e984d35f847244e13aa9c2af69d54e06477da22786a7175682e2d2cc3ba8bf89"} Nov 21 15:55:14 crc kubenswrapper[4967]: I1121 15:55:14.337511 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:55:14 crc kubenswrapper[4967]: I1121 15:55:14.348604 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4bf5cb6a-c8f8-43c3-b546-282bfd3244e2-etc-swift\") pod \"swift-storage-0\" (UID: \"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2\") " pod="openstack/swift-storage-0" Nov 21 15:55:14 crc kubenswrapper[4967]: I1121 15:55:14.464922 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.522231 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.522297 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.522369 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.523059 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.523117 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba" gracePeriod=600 Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.947575 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba" exitCode=0 Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.947662 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba"} Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.947731 4967 scope.go:117] "RemoveContainer" containerID="1d65d5683a0677cbfe6b7ae1700b31a63febdef0c20d9c6546a7663875980c24" Nov 21 15:55:16 crc kubenswrapper[4967]: I1121 15:55:16.950035 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerStarted","Data":"70bce5db24096bfe071da375da3a4ed4c55a159b4c706725d3609e7ec205996f"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.201531 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.213055 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.259813 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.306238 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.310150 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts\") pod \"ab58e136-893e-4b70-a0a3-d259b234dfcc\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.311318 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j868b\" (UniqueName: \"kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b\") pod \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.311377 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts\") pod \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.311420 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flpg8\" (UniqueName: \"kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8\") pod \"ab58e136-893e-4b70-a0a3-d259b234dfcc\" (UID: \"ab58e136-893e-4b70-a0a3-d259b234dfcc\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.315526 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9505ac95-12b0-426a-a2b5-42f13ec2fad8" (UID: "9505ac95-12b0-426a-a2b5-42f13ec2fad8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.316339 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ab58e136-893e-4b70-a0a3-d259b234dfcc" (UID: "ab58e136-893e-4b70-a0a3-d259b234dfcc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.333126 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8" (OuterVolumeSpecName: "kube-api-access-flpg8") pod "ab58e136-893e-4b70-a0a3-d259b234dfcc" (UID: "ab58e136-893e-4b70-a0a3-d259b234dfcc"). InnerVolumeSpecName "kube-api-access-flpg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.363381 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.366630 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b" (OuterVolumeSpecName: "kube-api-access-j868b") pod "a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" (UID: "a098c7dd-1c69-4c44-a9cb-65c81b00e9e1"). InnerVolumeSpecName "kube-api-access-j868b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.417782 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts\") pod \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\" (UID: \"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.418165 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts\") pod \"b6e85c34-bd76-47aa-b120-3410545d53f0\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.418202 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcgm7\" (UniqueName: \"kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7\") pod \"b6e85c34-bd76-47aa-b120-3410545d53f0\" (UID: \"b6e85c34-bd76-47aa-b120-3410545d53f0\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.419171 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w54mt\" (UniqueName: \"kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt\") pod \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\" (UID: \"9505ac95-12b0-426a-a2b5-42f13ec2fad8\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.419837 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6e85c34-bd76-47aa-b120-3410545d53f0" (UID: "b6e85c34-bd76-47aa-b120-3410545d53f0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.420859 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" (UID: "a098c7dd-1c69-4c44-a9cb-65c81b00e9e1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421535 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421640 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e85c34-bd76-47aa-b120-3410545d53f0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421726 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab58e136-893e-4b70-a0a3-d259b234dfcc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421793 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j868b\" (UniqueName: \"kubernetes.io/projected/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1-kube-api-access-j868b\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421859 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9505ac95-12b0-426a-a2b5-42f13ec2fad8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.421927 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flpg8\" (UniqueName: \"kubernetes.io/projected/ab58e136-893e-4b70-a0a3-d259b234dfcc-kube-api-access-flpg8\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.426179 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7" (OuterVolumeSpecName: "kube-api-access-bcgm7") pod "b6e85c34-bd76-47aa-b120-3410545d53f0" (UID: "b6e85c34-bd76-47aa-b120-3410545d53f0"). InnerVolumeSpecName "kube-api-access-bcgm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.427487 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt" (OuterVolumeSpecName: "kube-api-access-w54mt") pod "9505ac95-12b0-426a-a2b5-42f13ec2fad8" (UID: "9505ac95-12b0-426a-a2b5-42f13ec2fad8"). InnerVolumeSpecName "kube-api-access-w54mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.523420 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64czj\" (UniqueName: \"kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj\") pod \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.523503 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts\") pod \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\" (UID: \"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1\") " Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.524073 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" (UID: "5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.524178 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcgm7\" (UniqueName: \"kubernetes.io/projected/b6e85c34-bd76-47aa-b120-3410545d53f0-kube-api-access-bcgm7\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.524197 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w54mt\" (UniqueName: \"kubernetes.io/projected/9505ac95-12b0-426a-a2b5-42f13ec2fad8-kube-api-access-w54mt\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.527495 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj" (OuterVolumeSpecName: "kube-api-access-64czj") pod "5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" (UID: "5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1"). InnerVolumeSpecName "kube-api-access-64czj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.626755 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.626797 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64czj\" (UniqueName: \"kubernetes.io/projected/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1-kube-api-access-64czj\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.805482 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.964403 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-d7rdb" event={"ID":"b6e85c34-bd76-47aa-b120-3410545d53f0","Type":"ContainerDied","Data":"95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.964742 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95d14f4a407650d2ccc15b38fe70d51fe7dd424d4280fe055714dd981e537407" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.964430 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-d7rdb" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.975298 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.978739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"58555aef-0397-4247-be17-7efcbbb36fca","Type":"ContainerStarted","Data":"283b83a10d41bb506e7248dfdaf806cfc0c86d4c413d026a02a55337e15abd16"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.980642 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"9f7031a184440e4a6382ed54ffdd05143bfef3cde994aec6861846924f8c8a1a"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.982544 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-msr6l" event={"ID":"a098c7dd-1c69-4c44-a9cb-65c81b00e9e1","Type":"ContainerDied","Data":"5d09e3a1e4217d68427f7c2182f00760c1764a87d13f83db3355cb4d251a7821"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.982593 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d09e3a1e4217d68427f7c2182f00760c1764a87d13f83db3355cb4d251a7821" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.982589 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-msr6l" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.986278 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gkkl8" event={"ID":"0dab33d9-b2f0-4884-97ba-047b7772da9a","Type":"ContainerStarted","Data":"089119f5d74217fbf2b924d4b97a4f23c8fb5cd7d1e20ba685fa6d34e87cadc3"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.987896 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-e255-account-create-t44dj" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.988114 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-e255-account-create-t44dj" event={"ID":"5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1","Type":"ContainerDied","Data":"360966f604b98ef7d2aa9c223b1cd384a82ac31670762b5b11670fd134f40348"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.988153 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="360966f604b98ef7d2aa9c223b1cd384a82ac31670762b5b11670fd134f40348" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.990037 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5922-account-create-sq5cw" event={"ID":"9505ac95-12b0-426a-a2b5-42f13ec2fad8","Type":"ContainerDied","Data":"958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.990070 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="958225ffa437e97d25bce5a55d835d7bc4ce4dbf177cb977d93e7f97e532c484" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.990143 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5922-account-create-sq5cw" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.991824 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-r44s6" Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.998014 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-r44s6" event={"ID":"ab58e136-893e-4b70-a0a3-d259b234dfcc","Type":"ContainerDied","Data":"f7b9c26793aa645a0d910dff802970af2828be3e5dd892b3bac8fab045a8bdc1"} Nov 21 15:55:17 crc kubenswrapper[4967]: I1121 15:55:17.998072 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7b9c26793aa645a0d910dff802970af2828be3e5dd892b3bac8fab045a8bdc1" Nov 21 15:55:18 crc kubenswrapper[4967]: I1121 15:55:18.024769 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-gkkl8" podStartSLOduration=6.732813895 podStartE2EDuration="13.024750099s" podCreationTimestamp="2025-11-21 15:55:05 +0000 UTC" firstStartedPulling="2025-11-21 15:55:11.304214901 +0000 UTC m=+1199.562735909" lastFinishedPulling="2025-11-21 15:55:17.596151105 +0000 UTC m=+1205.854672113" observedRunningTime="2025-11-21 15:55:18.01882666 +0000 UTC m=+1206.277347668" watchObservedRunningTime="2025-11-21 15:55:18.024750099 +0000 UTC m=+1206.283271107" Nov 21 15:55:18 crc kubenswrapper[4967]: I1121 15:55:18.050971 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=13.837288328 podStartE2EDuration="17.05095241s" podCreationTimestamp="2025-11-21 15:55:01 +0000 UTC" firstStartedPulling="2025-11-21 15:55:10.871749485 +0000 UTC m=+1199.130270493" lastFinishedPulling="2025-11-21 15:55:14.085413567 +0000 UTC m=+1202.343934575" observedRunningTime="2025-11-21 15:55:18.034112838 +0000 UTC m=+1206.292633866" watchObservedRunningTime="2025-11-21 15:55:18.05095241 +0000 UTC m=+1206.309473418" Nov 21 15:55:22 crc kubenswrapper[4967]: I1121 15:55:22.031876 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"e9c001769945479fc0bb8be14ee7338f9592c345c3e510454fd2a1a2d587a351"} Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.043326 4967 generic.go:334] "Generic (PLEG): container finished" podID="9877cab6-ed78-4e94-83c9-b2a127e3b7b0" containerID="70bce5db24096bfe071da375da3a4ed4c55a159b4c706725d3609e7ec205996f" exitCode=0 Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.043491 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerDied","Data":"70bce5db24096bfe071da375da3a4ed4c55a159b4c706725d3609e7ec205996f"} Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.048054 4967 generic.go:334] "Generic (PLEG): container finished" podID="0dab33d9-b2f0-4884-97ba-047b7772da9a" containerID="089119f5d74217fbf2b924d4b97a4f23c8fb5cd7d1e20ba685fa6d34e87cadc3" exitCode=0 Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.048089 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gkkl8" event={"ID":"0dab33d9-b2f0-4884-97ba-047b7772da9a","Type":"ContainerDied","Data":"089119f5d74217fbf2b924d4b97a4f23c8fb5cd7d1e20ba685fa6d34e87cadc3"} Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.054059 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"74fadc0d4ac3445e70336d758930532e97cba07c142b1f9d4fd3944aa85cb413"} Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.054098 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"5d222089fc0ef0ee604cfe8e9b109dbaa9c54a912dc15050d07bd712180741d7"} Nov 21 15:55:23 crc kubenswrapper[4967]: I1121 15:55:23.054108 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"e89a7eee3f8371a25098e416b7f22ffa068ed26bd8dde7ec2b9436a09a87e816"} Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.082426 4967 generic.go:334] "Generic (PLEG): container finished" podID="fc07e264-27b3-4f82-b96e-04ef32de4c2c" containerID="2d2364867360eb071358a2df33122fcfac9db14bb17e08adcd21243d636a2259" exitCode=0 Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.083507 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9fbft" event={"ID":"fc07e264-27b3-4f82-b96e-04ef32de4c2c","Type":"ContainerDied","Data":"2d2364867360eb071358a2df33122fcfac9db14bb17e08adcd21243d636a2259"} Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.090977 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"051c6dee780ae256d511aa8d2e6f9527805f378d1cb42607c051176c21b30cdc"} Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.091026 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"92577a3809b1fc3dba76550079ff9641d4fe76c25793413c9a71aaf65360e205"} Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.093893 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerStarted","Data":"d3ac13bac54e18b0419ab715047b8212909f7bb34b2cea8124d8c14567d16730"} Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.539799 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.694396 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data\") pod \"0dab33d9-b2f0-4884-97ba-047b7772da9a\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.694803 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle\") pod \"0dab33d9-b2f0-4884-97ba-047b7772da9a\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.694847 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dkd6\" (UniqueName: \"kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6\") pod \"0dab33d9-b2f0-4884-97ba-047b7772da9a\" (UID: \"0dab33d9-b2f0-4884-97ba-047b7772da9a\") " Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.707821 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6" (OuterVolumeSpecName: "kube-api-access-4dkd6") pod "0dab33d9-b2f0-4884-97ba-047b7772da9a" (UID: "0dab33d9-b2f0-4884-97ba-047b7772da9a"). InnerVolumeSpecName "kube-api-access-4dkd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.723467 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dab33d9-b2f0-4884-97ba-047b7772da9a" (UID: "0dab33d9-b2f0-4884-97ba-047b7772da9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.742655 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data" (OuterVolumeSpecName: "config-data") pod "0dab33d9-b2f0-4884-97ba-047b7772da9a" (UID: "0dab33d9-b2f0-4884-97ba-047b7772da9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.797986 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.798029 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dab33d9-b2f0-4884-97ba-047b7772da9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:24 crc kubenswrapper[4967]: I1121 15:55:24.798044 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dkd6\" (UniqueName: \"kubernetes.io/projected/0dab33d9-b2f0-4884-97ba-047b7772da9a-kube-api-access-4dkd6\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.113226 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"28d1da8ed25e7cfd6062b6bce5f7b4218465824e0793fe2990c29738e4773da8"} Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.113283 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"76278a7ba8c63792405b03d5597b161812fa06efe67cc2524a4100d679906dd4"} Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.123437 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-gkkl8" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.123810 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-gkkl8" event={"ID":"0dab33d9-b2f0-4884-97ba-047b7772da9a","Type":"ContainerDied","Data":"d15eae1bb51b8f4ddcc79a1ba6d08062debfb4d55217804a901d56436caeb989"} Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.123879 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d15eae1bb51b8f4ddcc79a1ba6d08062debfb4d55217804a901d56436caeb989" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.339063 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.369928 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.370578 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e85c34-bd76-47aa-b120-3410545d53f0" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.370646 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e85c34-bd76-47aa-b120-3410545d53f0" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.370739 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9505ac95-12b0-426a-a2b5-42f13ec2fad8" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.370800 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9505ac95-12b0-426a-a2b5-42f13ec2fad8" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.370881 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.370958 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371032 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5930c6-1d89-4ef4-bd96-f290177d2aff" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371099 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5930c6-1d89-4ef4-bd96-f290177d2aff" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371175 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab58e136-893e-4b70-a0a3-d259b234dfcc" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371229 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab58e136-893e-4b70-a0a3-d259b234dfcc" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371344 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3813cbde-1076-466c-b72a-94ffa3741ef1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371425 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3813cbde-1076-466c-b72a-94ffa3741ef1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371504 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dab33d9-b2f0-4884-97ba-047b7772da9a" containerName="keystone-db-sync" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371565 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dab33d9-b2f0-4884-97ba-047b7772da9a" containerName="keystone-db-sync" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371665 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371732 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: E1121 15:55:25.371819 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.371908 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.372226 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.372337 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3813cbde-1076-466c-b72a-94ffa3741ef1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.372458 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab58e136-893e-4b70-a0a3-d259b234dfcc" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.372546 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.372620 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5930c6-1d89-4ef4-bd96-f290177d2aff" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.373446 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6e85c34-bd76-47aa-b120-3410545d53f0" containerName="mariadb-database-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.373533 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9505ac95-12b0-426a-a2b5-42f13ec2fad8" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.373614 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" containerName="mariadb-account-create" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.373698 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dab33d9-b2f0-4884-97ba-047b7772da9a" containerName="keystone-db-sync" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.387646 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.388004 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.455942 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-f9fq7"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.458258 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.465858 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.466181 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.466373 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.466537 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.474943 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lbgzv" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.513220 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.513848 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfcvg\" (UniqueName: \"kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.513968 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.514052 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.514477 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.522212 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-f9fq7"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.545072 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-96plk"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.552164 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.558203 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.562707 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-z24sf" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.590384 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-96plk"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.598492 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-pjq5x"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.599738 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.605484 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.605743 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.605884 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wp75j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.633982 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfcvg\" (UniqueName: \"kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634073 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634114 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634177 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634201 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634230 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634275 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634498 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634550 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634641 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhzc6\" (UniqueName: \"kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.634735 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.635365 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.635450 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.636109 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.636471 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.671354 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-pjq5x"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.711570 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfcvg\" (UniqueName: \"kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg\") pod \"dnsmasq-dns-5c9d85d47c-mv46j\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739290 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739354 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739380 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739459 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzxxr\" (UniqueName: \"kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739488 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhzc6\" (UniqueName: \"kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739531 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739549 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739579 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.739619 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5gzv\" (UniqueName: \"kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.742659 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744101 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744153 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744264 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744290 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744378 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.744426 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.748210 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.749889 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.753843 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.761844 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.776890 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhzc6\" (UniqueName: \"kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.815815 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys\") pod \"keystone-bootstrap-f9fq7\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.838406 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-r85k5"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.841022 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.848380 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g4dqx" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851234 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851302 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851430 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5gzv\" (UniqueName: \"kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851644 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851691 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851748 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851813 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.851947 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.852032 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzxxr\" (UniqueName: \"kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.855410 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.861456 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.864266 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.867845 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.877253 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.884073 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.893108 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5gzv\" (UniqueName: \"kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.895880 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.900472 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.901359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data\") pod \"heat-db-sync-96plk\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.902004 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-96plk" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.920129 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzxxr\" (UniqueName: \"kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr\") pod \"cinder-db-sync-pjq5x\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.954255 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.954470 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlpds\" (UniqueName: \"kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.954507 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.959364 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-tcdk8"] Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.961617 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.969469 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.972604 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 15:55:25 crc kubenswrapper[4967]: I1121 15:55:25.972849 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rp7j4" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.014709 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.030894 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r85k5"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.046087 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.047992 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.055811 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.055849 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8d95\" (UniqueName: \"kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.055930 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlpds\" (UniqueName: \"kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.055962 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.055995 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.056017 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.058499 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tcdk8"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.067527 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.070152 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-zb9np"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.071386 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.071905 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.076693 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8f7vq" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.076853 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.076960 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.088283 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlpds\" (UniqueName: \"kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds\") pod \"barbican-db-sync-r85k5\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.089041 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.097421 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.116619 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.123167 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.126599 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.131147 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-zb9np"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.131502 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.149912 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.152352 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-9fbft" event={"ID":"fc07e264-27b3-4f82-b96e-04ef32de4c2c","Type":"ContainerDied","Data":"853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad"} Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.152392 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="853579d280b37c89d9803a752955cf84a2c619a6ee632f40adefbd5e471ac9ad" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158479 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158554 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158591 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158619 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgfw5\" (UniqueName: \"kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158668 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158706 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f87xj\" (UniqueName: \"kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158724 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158742 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158759 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158808 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158825 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8d95\" (UniqueName: \"kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158853 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.158899 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.171120 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.185012 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.198480 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8d95\" (UniqueName: \"kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95\") pod \"neutron-db-sync-tcdk8\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.261424 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd6c2\" (UniqueName: \"kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.261825 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.261900 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.261945 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.261983 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgfw5\" (UniqueName: \"kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262101 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262137 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262170 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262194 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262227 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f87xj\" (UniqueName: \"kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262259 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262284 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262301 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262433 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262473 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262554 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.262620 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.265155 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.265744 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.266838 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.268288 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.268301 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.275800 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.284951 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.285443 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.290735 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgfw5\" (UniqueName: \"kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5\") pod \"placement-db-sync-zb9np\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.294083 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f87xj\" (UniqueName: \"kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj\") pod \"dnsmasq-dns-6ffb94d8ff-chtb2\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364490 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd6c2\" (UniqueName: \"kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364558 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364627 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364640 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364658 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364724 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.364771 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.365689 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.367942 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.379992 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.381112 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.393841 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.395280 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.738269 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd6c2\" (UniqueName: \"kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2\") pod \"ceilometer-0\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " pod="openstack/ceilometer-0" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.884681 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-pjq5x"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.919840 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9fbft" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.953157 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r85k5" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.963229 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-96plk"] Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.980555 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.989089 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data\") pod \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.989157 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs4df\" (UniqueName: \"kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df\") pod \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.989353 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data\") pod \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.989434 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle\") pod \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\" (UID: \"fc07e264-27b3-4f82-b96e-04ef32de4c2c\") " Nov 21 15:55:26 crc kubenswrapper[4967]: I1121 15:55:26.999655 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.003166 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df" (OuterVolumeSpecName: "kube-api-access-bs4df") pod "fc07e264-27b3-4f82-b96e-04ef32de4c2c" (UID: "fc07e264-27b3-4f82-b96e-04ef32de4c2c"). InnerVolumeSpecName "kube-api-access-bs4df". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.010899 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fc07e264-27b3-4f82-b96e-04ef32de4c2c" (UID: "fc07e264-27b3-4f82-b96e-04ef32de4c2c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.050862 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data" (OuterVolumeSpecName: "config-data") pod "fc07e264-27b3-4f82-b96e-04ef32de4c2c" (UID: "fc07e264-27b3-4f82-b96e-04ef32de4c2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.055118 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc07e264-27b3-4f82-b96e-04ef32de4c2c" (UID: "fc07e264-27b3-4f82-b96e-04ef32de4c2c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.064378 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zb9np" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.080689 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.100577 4967 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.100782 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs4df\" (UniqueName: \"kubernetes.io/projected/fc07e264-27b3-4f82-b96e-04ef32de4c2c-kube-api-access-bs4df\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.100840 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.100892 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc07e264-27b3-4f82-b96e-04ef32de4c2c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.181528 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-9fbft" Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.182755 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerStarted","Data":"e4616f4b0787156b57a45977f1777db91487a460bdb6eb0e544c754dec45d051"} Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.232274 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:27 crc kubenswrapper[4967]: I1121 15:55:27.348978 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-f9fq7"] Nov 21 15:55:27 crc kubenswrapper[4967]: W1121 15:55:27.634617 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71cb393a_d56c_4ddc_8bb6_8b7ea26ef9ad.slice/crio-c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013 WatchSource:0}: Error finding container c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013: Status 404 returned error can't find the container with id c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013 Nov 21 15:55:27 crc kubenswrapper[4967]: W1121 15:55:27.638389 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0fc2724_5c56_4db8_9a1e_4662761791c3.slice/crio-548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5 WatchSource:0}: Error finding container 548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5: Status 404 returned error can't find the container with id 548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5 Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.377719 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pjq5x" event={"ID":"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad","Type":"ContainerStarted","Data":"c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013"} Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.511567 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-f9fq7" event={"ID":"2ede99ef-193f-4af6-9180-6b7557463c62","Type":"ContainerStarted","Data":"abaaf5020f9d023dcfa4f453102bf8600ebeabcad2afb670a9b568689f8d23c9"} Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.535981 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.685780 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-96plk" event={"ID":"e0fc2724-5c56-4db8-9a1e-4662761791c3","Type":"ContainerStarted","Data":"548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5"} Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.700890 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" event={"ID":"9fae67e7-bb2e-4713-bb58-dbdc308377db","Type":"ContainerStarted","Data":"43630a7c337cb7d0c9f68ad24e8831d9fb2541e4870ac7e17f73b91fe4361bc7"} Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.781955 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.911604 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:55:28 crc kubenswrapper[4967]: E1121 15:55:28.912056 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc07e264-27b3-4f82-b96e-04ef32de4c2c" containerName="glance-db-sync" Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.912076 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc07e264-27b3-4f82-b96e-04ef32de4c2c" containerName="glance-db-sync" Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.912272 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc07e264-27b3-4f82-b96e-04ef32de4c2c" containerName="glance-db-sync" Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.913637 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:28 crc kubenswrapper[4967]: I1121 15:55:28.955585 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.012457 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.012852 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbfkv\" (UniqueName: \"kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.012936 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.013123 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.013249 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.035121 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r85k5"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.116617 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.116769 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbfkv\" (UniqueName: \"kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.116879 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.116965 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.117053 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.118076 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.118596 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.118763 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.119234 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.164626 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.165890 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbfkv\" (UniqueName: \"kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv\") pod \"dnsmasq-dns-56798b757f-7zmcj\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.239658 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.302945 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-zb9np"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.460466 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.664621 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tcdk8"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.761412 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tcdk8" event={"ID":"db63398d-117e-4a60-b548-e1684dbef263","Type":"ContainerStarted","Data":"b07b691626b627134916d82d90444b4cd84fa91c70bdb709081a35f93e031976"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.817264 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9877cab6-ed78-4e94-83c9-b2a127e3b7b0","Type":"ContainerStarted","Data":"8c3e672e4bb685a0cbbd5299843d58c23e2bbd3fac0e801c37a37cfb02fa1f39"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.870570 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.872888 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.878650 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.882575 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerStarted","Data":"c289be9cd5b37960b44f7038fc7dd73dcd099930d83da135fbf1051b63e7f943"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.882839 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.883017 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2sd9k" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.892135 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-f9fq7" event={"ID":"2ede99ef-193f-4af6-9180-6b7557463c62","Type":"ContainerStarted","Data":"b90e64f0a84c9b72beaa4635b77929bc0f532a39d96eb6cb1d1d2d82efa5f075"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.897014 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.89698503 podStartE2EDuration="19.89698503s" podCreationTimestamp="2025-11-21 15:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:29.859838705 +0000 UTC m=+1218.118359723" watchObservedRunningTime="2025-11-21 15:55:29.89698503 +0000 UTC m=+1218.155506038" Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.914448 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zb9np" event={"ID":"6d0b548b-65d2-496b-a8f0-5556b5e9760e","Type":"ContainerStarted","Data":"88549493ac0fdb7848faaa622469a99b465e3a9f77d4470edef43ab83a865848"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.958132 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r85k5" event={"ID":"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280","Type":"ContainerStarted","Data":"957a4d9e3cf597ef5af2e2be4b081fc187f4798058301fba046bf533c2bc369b"} Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.958193 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:55:29 crc kubenswrapper[4967]: I1121 15:55:29.992232 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" event={"ID":"1244a28a-d29b-4cfd-b59a-b8993162ff33","Type":"ContainerStarted","Data":"8326f3f87abdca33d5833c8a4662777d0fa037dbee81ac4f6aa4a8c5062aa72e"} Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.052771 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.052836 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4cdz\" (UniqueName: \"kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.052867 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.052754 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-f9fq7" podStartSLOduration=5.052730434 podStartE2EDuration="5.052730434s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:29.931789848 +0000 UTC m=+1218.190310856" watchObservedRunningTime="2025-11-21 15:55:30.052730434 +0000 UTC m=+1218.311251442" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.053082 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.053108 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.053157 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.053176 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.089692 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.105099 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"bae895d31ffb6d7d0e7e13744f4acc4568d5bb9096290eb78cb691d93effcc29"} Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.105528 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.109161 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.112716 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.119076 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155479 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155524 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155596 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155649 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155675 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4cdz\" (UniqueName: \"kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.155699 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.161400 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.161710 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.162014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.162077 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.162404 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.162440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.167126 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.209924 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4cdz\" (UniqueName: \"kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.261042 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.261177 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.261203 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9qx4x\" (UniqueName: \"kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.269783 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.269921 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.270097 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.270157 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.311438 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.371608 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.371851 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.371969 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.372051 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.372169 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.372276 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.372425 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9qx4x\" (UniqueName: \"kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.373167 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.374521 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.375927 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.377813 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.380204 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.390464 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.423051 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9qx4x\" (UniqueName: \"kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.425566 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.805051 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:55:30 crc kubenswrapper[4967]: I1121 15:55:30.955214 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.135951 4967 generic.go:334] "Generic (PLEG): container finished" podID="9fae67e7-bb2e-4713-bb58-dbdc308377db" containerID="16a72adf1a9b21a77ea74af9044ec17175464d60b70cf2520c600d2e8ab295c5" exitCode=0 Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.136164 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" event={"ID":"9fae67e7-bb2e-4713-bb58-dbdc308377db","Type":"ContainerDied","Data":"16a72adf1a9b21a77ea74af9044ec17175464d60b70cf2520c600d2e8ab295c5"} Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.149037 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerStarted","Data":"fb4eda99c574d13cf23bb6d080e1b5323d2f357e0502c0f5d3cffdce14fda2ea"} Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.532547 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.593253 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.886263 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.933302 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config\") pod \"9fae67e7-bb2e-4713-bb58-dbdc308377db\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.933710 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfcvg\" (UniqueName: \"kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg\") pod \"9fae67e7-bb2e-4713-bb58-dbdc308377db\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.933779 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc\") pod \"9fae67e7-bb2e-4713-bb58-dbdc308377db\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.933908 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb\") pod \"9fae67e7-bb2e-4713-bb58-dbdc308377db\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.933993 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb\") pod \"9fae67e7-bb2e-4713-bb58-dbdc308377db\" (UID: \"9fae67e7-bb2e-4713-bb58-dbdc308377db\") " Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.942899 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg" (OuterVolumeSpecName: "kube-api-access-gfcvg") pod "9fae67e7-bb2e-4713-bb58-dbdc308377db" (UID: "9fae67e7-bb2e-4713-bb58-dbdc308377db"). InnerVolumeSpecName "kube-api-access-gfcvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.947638 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.972891 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9fae67e7-bb2e-4713-bb58-dbdc308377db" (UID: "9fae67e7-bb2e-4713-bb58-dbdc308377db"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.975546 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config" (OuterVolumeSpecName: "config") pod "9fae67e7-bb2e-4713-bb58-dbdc308377db" (UID: "9fae67e7-bb2e-4713-bb58-dbdc308377db"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.984118 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9fae67e7-bb2e-4713-bb58-dbdc308377db" (UID: "9fae67e7-bb2e-4713-bb58-dbdc308377db"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:31 crc kubenswrapper[4967]: I1121 15:55:31.994867 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9fae67e7-bb2e-4713-bb58-dbdc308377db" (UID: "9fae67e7-bb2e-4713-bb58-dbdc308377db"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.036516 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.036550 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.036563 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.036577 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfcvg\" (UniqueName: \"kubernetes.io/projected/9fae67e7-bb2e-4713-bb58-dbdc308377db-kube-api-access-gfcvg\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.036591 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fae67e7-bb2e-4713-bb58-dbdc308377db-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.161277 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerStarted","Data":"00cbdcae1c898b75b060b409434501f57a759647179f1d69842992ee69aa8331"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.165197 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" event={"ID":"1244a28a-d29b-4cfd-b59a-b8993162ff33","Type":"ContainerStarted","Data":"dfa179145dc161469623cdc73c147442212d81c2c5668af1bf7f8235fc1fe992"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.165385 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" podUID="1244a28a-d29b-4cfd-b59a-b8993162ff33" containerName="init" containerID="cri-o://dfa179145dc161469623cdc73c147442212d81c2c5668af1bf7f8235fc1fe992" gracePeriod=10 Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.179028 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerStarted","Data":"5b4c2f83ba2a7e39675da85302fcd32511837f30f18460b59c0fb775d285f90a"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.197578 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"46ff87a0fb7e4ab5e4ca13b1cdf0e93409cfcdc897222d4ed4d0c02fcb82e244"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.200111 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tcdk8" event={"ID":"db63398d-117e-4a60-b548-e1684dbef263","Type":"ContainerStarted","Data":"5a68a1617fd748c8f26d252405e183a72786bedb5ce9ad831364ec556cf7f9c8"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.202519 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" event={"ID":"9fae67e7-bb2e-4713-bb58-dbdc308377db","Type":"ContainerDied","Data":"43630a7c337cb7d0c9f68ad24e8831d9fb2541e4870ac7e17f73b91fe4361bc7"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.202567 4967 scope.go:117] "RemoveContainer" containerID="16a72adf1a9b21a77ea74af9044ec17175464d60b70cf2520c600d2e8ab295c5" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.202708 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9d85d47c-mv46j" Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.224901 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerStarted","Data":"c4fb1ee1d2f2d04df192a33ffd9db2ab2b36f57234766c5caf71acb19a62c594"} Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.300826 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.313133 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9d85d47c-mv46j"] Nov 21 15:55:32 crc kubenswrapper[4967]: I1121 15:55:32.566903 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fae67e7-bb2e-4713-bb58-dbdc308377db" path="/var/lib/kubelet/pods/9fae67e7-bb2e-4713-bb58-dbdc308377db/volumes" Nov 21 15:55:33 crc kubenswrapper[4967]: I1121 15:55:33.238680 4967 generic.go:334] "Generic (PLEG): container finished" podID="1244a28a-d29b-4cfd-b59a-b8993162ff33" containerID="dfa179145dc161469623cdc73c147442212d81c2c5668af1bf7f8235fc1fe992" exitCode=0 Nov 21 15:55:33 crc kubenswrapper[4967]: I1121 15:55:33.238759 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" event={"ID":"1244a28a-d29b-4cfd-b59a-b8993162ff33","Type":"ContainerDied","Data":"dfa179145dc161469623cdc73c147442212d81c2c5668af1bf7f8235fc1fe992"} Nov 21 15:55:33 crc kubenswrapper[4967]: I1121 15:55:33.241796 4967 generic.go:334] "Generic (PLEG): container finished" podID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerID="00cbdcae1c898b75b060b409434501f57a759647179f1d69842992ee69aa8331" exitCode=0 Nov 21 15:55:33 crc kubenswrapper[4967]: I1121 15:55:33.243806 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerDied","Data":"00cbdcae1c898b75b060b409434501f57a759647179f1d69842992ee69aa8331"} Nov 21 15:55:33 crc kubenswrapper[4967]: I1121 15:55:33.297242 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-tcdk8" podStartSLOduration=8.2972234 podStartE2EDuration="8.2972234s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:33.290210889 +0000 UTC m=+1221.548731907" watchObservedRunningTime="2025-11-21 15:55:33.2972234 +0000 UTC m=+1221.555744408" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.289131 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"93387223fa59879df780752b85701a79bc54d8c11400ba6e9faa3fdf97cfc55c"} Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.426053 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.540935 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb\") pod \"1244a28a-d29b-4cfd-b59a-b8993162ff33\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.541025 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config\") pod \"1244a28a-d29b-4cfd-b59a-b8993162ff33\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.541229 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb\") pod \"1244a28a-d29b-4cfd-b59a-b8993162ff33\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.541423 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc\") pod \"1244a28a-d29b-4cfd-b59a-b8993162ff33\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.541557 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f87xj\" (UniqueName: \"kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj\") pod \"1244a28a-d29b-4cfd-b59a-b8993162ff33\" (UID: \"1244a28a-d29b-4cfd-b59a-b8993162ff33\") " Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.549216 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj" (OuterVolumeSpecName: "kube-api-access-f87xj") pod "1244a28a-d29b-4cfd-b59a-b8993162ff33" (UID: "1244a28a-d29b-4cfd-b59a-b8993162ff33"). InnerVolumeSpecName "kube-api-access-f87xj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.574588 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1244a28a-d29b-4cfd-b59a-b8993162ff33" (UID: "1244a28a-d29b-4cfd-b59a-b8993162ff33"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.574785 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1244a28a-d29b-4cfd-b59a-b8993162ff33" (UID: "1244a28a-d29b-4cfd-b59a-b8993162ff33"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.582747 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1244a28a-d29b-4cfd-b59a-b8993162ff33" (UID: "1244a28a-d29b-4cfd-b59a-b8993162ff33"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.593387 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config" (OuterVolumeSpecName: "config") pod "1244a28a-d29b-4cfd-b59a-b8993162ff33" (UID: "1244a28a-d29b-4cfd-b59a-b8993162ff33"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.645487 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f87xj\" (UniqueName: \"kubernetes.io/projected/1244a28a-d29b-4cfd-b59a-b8993162ff33-kube-api-access-f87xj\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.645529 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.645540 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.645548 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:34 crc kubenswrapper[4967]: I1121 15:55:34.645556 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1244a28a-d29b-4cfd-b59a-b8993162ff33-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.317428 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerStarted","Data":"ecc74cb8693230dc26656497af421cafabdd6fd870605a42100a83b90ab895a7"} Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.321965 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" event={"ID":"1244a28a-d29b-4cfd-b59a-b8993162ff33","Type":"ContainerDied","Data":"8326f3f87abdca33d5833c8a4662777d0fa037dbee81ac4f6aa4a8c5062aa72e"} Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.322025 4967 scope.go:117] "RemoveContainer" containerID="dfa179145dc161469623cdc73c147442212d81c2c5668af1bf7f8235fc1fe992" Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.322184 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6ffb94d8ff-chtb2" Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.337549 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerStarted","Data":"e18249eeeb2c1b5c3d93d87f40580ca99ad5ae82a59aac306906185d19fed559"} Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.398098 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:35 crc kubenswrapper[4967]: I1121 15:55:35.411024 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6ffb94d8ff-chtb2"] Nov 21 15:55:36 crc kubenswrapper[4967]: I1121 15:55:36.374477 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerStarted","Data":"3bfc8fac75006f89d9cb64fef1803839d9d486d0b5248f9b30304a8f973d2c63"} Nov 21 15:55:36 crc kubenswrapper[4967]: I1121 15:55:36.385053 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:55:36 crc kubenswrapper[4967]: I1121 15:55:36.477464 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:55:36 crc kubenswrapper[4967]: I1121 15:55:36.554039 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1244a28a-d29b-4cfd-b59a-b8993162ff33" path="/var/lib/kubelet/pods/1244a28a-d29b-4cfd-b59a-b8993162ff33/volumes" Nov 21 15:55:37 crc kubenswrapper[4967]: I1121 15:55:37.385423 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:37 crc kubenswrapper[4967]: I1121 15:55:37.416767 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" podStartSLOduration=9.416747895 podStartE2EDuration="9.416747895s" podCreationTimestamp="2025-11-21 15:55:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:37.406521342 +0000 UTC m=+1225.665042350" watchObservedRunningTime="2025-11-21 15:55:37.416747895 +0000 UTC m=+1225.675268903" Nov 21 15:55:38 crc kubenswrapper[4967]: I1121 15:55:38.430094 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerStarted","Data":"2048e45ca359ca990315d60aad0e3858897f1fc024089e5ba27f529f81baa508"} Nov 21 15:55:38 crc kubenswrapper[4967]: I1121 15:55:38.433665 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerStarted","Data":"522da33baa379acfe695485250ed4e2e3974128641163970b942d3b0ae6844df"} Nov 21 15:55:38 crc kubenswrapper[4967]: I1121 15:55:38.441460 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"1b0df6dc9d09203072ec2af3e8f498bb27b92126c5a1c5c2f163a54cc02b7ad1"} Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.462156 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-log" containerID="cri-o://ecc74cb8693230dc26656497af421cafabdd6fd870605a42100a83b90ab895a7" gracePeriod=30 Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.462723 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"c1ee4e0caa16abaff827baa9e10b8a2b3727ab78b86729049dc3157b60abeba7"} Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.462756 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-httpd" containerID="cri-o://2048e45ca359ca990315d60aad0e3858897f1fc024089e5ba27f529f81baa508" gracePeriod=30 Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.462842 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-log" containerID="cri-o://e18249eeeb2c1b5c3d93d87f40580ca99ad5ae82a59aac306906185d19fed559" gracePeriod=30 Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.462879 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-httpd" containerID="cri-o://522da33baa379acfe695485250ed4e2e3974128641163970b942d3b0ae6844df" gracePeriod=30 Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.499622 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.499596636 podStartE2EDuration="10.499596636s" podCreationTimestamp="2025-11-21 15:55:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:39.498973778 +0000 UTC m=+1227.757494786" watchObservedRunningTime="2025-11-21 15:55:39.499596636 +0000 UTC m=+1227.758117654" Nov 21 15:55:39 crc kubenswrapper[4967]: I1121 15:55:39.521053 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.52102108 podStartE2EDuration="10.52102108s" podCreationTimestamp="2025-11-21 15:55:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:55:39.51995749 +0000 UTC m=+1227.778478498" watchObservedRunningTime="2025-11-21 15:55:39.52102108 +0000 UTC m=+1227.779542088" Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.481113 4967 generic.go:334] "Generic (PLEG): container finished" podID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerID="2048e45ca359ca990315d60aad0e3858897f1fc024089e5ba27f529f81baa508" exitCode=0 Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.481477 4967 generic.go:334] "Generic (PLEG): container finished" podID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerID="ecc74cb8693230dc26656497af421cafabdd6fd870605a42100a83b90ab895a7" exitCode=143 Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.481547 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerDied","Data":"2048e45ca359ca990315d60aad0e3858897f1fc024089e5ba27f529f81baa508"} Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.481576 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerDied","Data":"ecc74cb8693230dc26656497af421cafabdd6fd870605a42100a83b90ab895a7"} Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.485038 4967 generic.go:334] "Generic (PLEG): container finished" podID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerID="522da33baa379acfe695485250ed4e2e3974128641163970b942d3b0ae6844df" exitCode=0 Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.485180 4967 generic.go:334] "Generic (PLEG): container finished" podID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerID="e18249eeeb2c1b5c3d93d87f40580ca99ad5ae82a59aac306906185d19fed559" exitCode=143 Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.485148 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerDied","Data":"522da33baa379acfe695485250ed4e2e3974128641163970b942d3b0ae6844df"} Nov 21 15:55:40 crc kubenswrapper[4967]: I1121 15:55:40.485382 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerDied","Data":"e18249eeeb2c1b5c3d93d87f40580ca99ad5ae82a59aac306906185d19fed559"} Nov 21 15:55:41 crc kubenswrapper[4967]: I1121 15:55:41.531671 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:41 crc kubenswrapper[4967]: I1121 15:55:41.544577 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:42 crc kubenswrapper[4967]: I1121 15:55:42.506544 4967 generic.go:334] "Generic (PLEG): container finished" podID="2ede99ef-193f-4af6-9180-6b7557463c62" containerID="b90e64f0a84c9b72beaa4635b77929bc0f532a39d96eb6cb1d1d2d82efa5f075" exitCode=0 Nov 21 15:55:42 crc kubenswrapper[4967]: I1121 15:55:42.506629 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-f9fq7" event={"ID":"2ede99ef-193f-4af6-9180-6b7557463c62","Type":"ContainerDied","Data":"b90e64f0a84c9b72beaa4635b77929bc0f532a39d96eb6cb1d1d2d82efa5f075"} Nov 21 15:55:42 crc kubenswrapper[4967]: I1121 15:55:42.512905 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 21 15:55:44 crc kubenswrapper[4967]: I1121 15:55:44.240484 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:55:44 crc kubenswrapper[4967]: I1121 15:55:44.304298 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:55:44 crc kubenswrapper[4967]: I1121 15:55:44.304553 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" containerID="cri-o://d00e911b5f610723bdfe86e653cc8ee024c662cb4968b26abf6b9606993f8579" gracePeriod=10 Nov 21 15:55:46 crc kubenswrapper[4967]: I1121 15:55:46.546352 4967 generic.go:334] "Generic (PLEG): container finished" podID="3678c17b-6120-4063-b84d-7cb362b46b62" containerID="d00e911b5f610723bdfe86e653cc8ee024c662cb4968b26abf6b9606993f8579" exitCode=0 Nov 21 15:55:46 crc kubenswrapper[4967]: I1121 15:55:46.549704 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" event={"ID":"3678c17b-6120-4063-b84d-7cb362b46b62","Type":"ContainerDied","Data":"d00e911b5f610723bdfe86e653cc8ee024c662cb4968b26abf6b9606993f8579"} Nov 21 15:55:46 crc kubenswrapper[4967]: I1121 15:55:46.586999 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:55:51 crc kubenswrapper[4967]: I1121 15:55:51.586713 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:55:54 crc kubenswrapper[4967]: E1121 15:55:54.059363 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 21 15:55:54 crc kubenswrapper[4967]: E1121 15:55:54.060024 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgfw5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-zb9np_openstack(6d0b548b-65d2-496b-a8f0-5556b5e9760e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:55:54 crc kubenswrapper[4967]: E1121 15:55:54.061206 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-zb9np" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" Nov 21 15:55:54 crc kubenswrapper[4967]: E1121 15:55:54.635412 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-zb9np" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" Nov 21 15:55:56 crc kubenswrapper[4967]: I1121 15:55:56.585973 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:55:56 crc kubenswrapper[4967]: I1121 15:55:56.586289 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:56:00 crc kubenswrapper[4967]: I1121 15:56:00.807093 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:56:00 crc kubenswrapper[4967]: I1121 15:56:00.807419 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:56:00 crc kubenswrapper[4967]: I1121 15:56:00.956187 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:00 crc kubenswrapper[4967]: I1121 15:56:00.956251 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:01 crc kubenswrapper[4967]: I1121 15:56:01.586437 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:56:06 crc kubenswrapper[4967]: I1121 15:56:06.586713 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:56:11 crc kubenswrapper[4967]: I1121 15:56:11.586397 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Nov 21 15:56:11 crc kubenswrapper[4967]: I1121 15:56:11.878148 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:56:11 crc kubenswrapper[4967]: E1121 15:56:11.924224 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 21 15:56:11 crc kubenswrapper[4967]: E1121 15:56:11.924455 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hzxxr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-pjq5x_openstack(71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:56:11 crc kubenswrapper[4967]: E1121 15:56:11.925605 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-pjq5x" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.027562 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-f9fq7" event={"ID":"2ede99ef-193f-4af6-9180-6b7557463c62","Type":"ContainerDied","Data":"abaaf5020f9d023dcfa4f453102bf8600ebeabcad2afb670a9b568689f8d23c9"} Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.027608 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abaaf5020f9d023dcfa4f453102bf8600ebeabcad2afb670a9b568689f8d23c9" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.027768 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-f9fq7" Nov 21 15:56:12 crc kubenswrapper[4967]: E1121 15:56:12.029536 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-pjq5x" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.079662 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhzc6\" (UniqueName: \"kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.079779 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.080077 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.080138 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.080172 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.080238 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts\") pod \"2ede99ef-193f-4af6-9180-6b7557463c62\" (UID: \"2ede99ef-193f-4af6-9180-6b7557463c62\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.085088 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6" (OuterVolumeSpecName: "kube-api-access-lhzc6") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "kube-api-access-lhzc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.086566 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts" (OuterVolumeSpecName: "scripts") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.086649 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.086707 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.110632 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.116153 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data" (OuterVolumeSpecName: "config-data") pod "2ede99ef-193f-4af6-9180-6b7557463c62" (UID: "2ede99ef-193f-4af6-9180-6b7557463c62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181687 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181722 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhzc6\" (UniqueName: \"kubernetes.io/projected/2ede99ef-193f-4af6-9180-6b7557463c62-kube-api-access-lhzc6\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181737 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181746 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181755 4967 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.181763 4967 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ede99ef-193f-4af6-9180-6b7557463c62-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.526460 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.534237 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:12 crc kubenswrapper[4967]: E1121 15:56:12.623222 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Nov 21 15:56:12 crc kubenswrapper[4967]: E1121 15:56:12.623387 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m5gzv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-96plk_openstack(e0fc2724-5c56-4db8-9a1e-4662761791c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:56:12 crc kubenswrapper[4967]: E1121 15:56:12.624585 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-96plk" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.694971 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695055 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4cdz\" (UniqueName: \"kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695119 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695148 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695174 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695243 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695300 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695657 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs" (OuterVolumeSpecName: "logs") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695763 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695768 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695805 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.695917 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696027 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696103 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696123 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs" (OuterVolumeSpecName: "logs") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696130 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data\") pod \"412d7d72-53e5-43e5-a85d-0fef4785dda1\" (UID: \"412d7d72-53e5-43e5-a85d-0fef4785dda1\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696180 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9qx4x\" (UniqueName: \"kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x\") pod \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\" (UID: \"ad125b00-796b-4208-a2ad-8488ec4d2fd9\") " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.696572 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.697327 4967 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.697355 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad125b00-796b-4208-a2ad-8488ec4d2fd9-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.697368 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.698572 4967 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/412d7d72-53e5-43e5-a85d-0fef4785dda1-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.699125 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts" (OuterVolumeSpecName: "scripts") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.702370 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.702428 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz" (OuterVolumeSpecName: "kube-api-access-w4cdz") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "kube-api-access-w4cdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.702517 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts" (OuterVolumeSpecName: "scripts") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.703083 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x" (OuterVolumeSpecName: "kube-api-access-9qx4x") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "kube-api-access-9qx4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.704433 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.730684 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.736460 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.751885 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data" (OuterVolumeSpecName: "config-data") pod "412d7d72-53e5-43e5-a85d-0fef4785dda1" (UID: "412d7d72-53e5-43e5-a85d-0fef4785dda1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.756617 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data" (OuterVolumeSpecName: "config-data") pod "ad125b00-796b-4208-a2ad-8488ec4d2fd9" (UID: "ad125b00-796b-4208-a2ad-8488ec4d2fd9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801110 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801152 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801166 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801178 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801188 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/412d7d72-53e5-43e5-a85d-0fef4785dda1-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801198 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9qx4x\" (UniqueName: \"kubernetes.io/projected/ad125b00-796b-4208-a2ad-8488ec4d2fd9-kube-api-access-9qx4x\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801240 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801256 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4cdz\" (UniqueName: \"kubernetes.io/projected/412d7d72-53e5-43e5-a85d-0fef4785dda1-kube-api-access-w4cdz\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801273 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.801284 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad125b00-796b-4208-a2ad-8488ec4d2fd9-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.829773 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.829887 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.902937 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.902978 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.956138 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-f9fq7"] Nov 21 15:56:12 crc kubenswrapper[4967]: I1121 15:56:12.964851 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-f9fq7"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.044260 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ad125b00-796b-4208-a2ad-8488ec4d2fd9","Type":"ContainerDied","Data":"c4fb1ee1d2f2d04df192a33ffd9db2ab2b36f57234766c5caf71acb19a62c594"} Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.044293 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.044336 4967 scope.go:117] "RemoveContainer" containerID="2048e45ca359ca990315d60aad0e3858897f1fc024089e5ba27f529f81baa508" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.047655 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"412d7d72-53e5-43e5-a85d-0fef4785dda1","Type":"ContainerDied","Data":"5b4c2f83ba2a7e39675da85302fcd32511837f30f18460b59c0fb775d285f90a"} Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.047671 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.048840 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-96plk" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.074713 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-z4nb7"] Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075278 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ede99ef-193f-4af6-9180-6b7557463c62" containerName="keystone-bootstrap" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075333 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ede99ef-193f-4af6-9180-6b7557463c62" containerName="keystone-bootstrap" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075358 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1244a28a-d29b-4cfd-b59a-b8993162ff33" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075367 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1244a28a-d29b-4cfd-b59a-b8993162ff33" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075385 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075395 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075408 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075415 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075427 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075434 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075456 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075463 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.075501 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fae67e7-bb2e-4713-bb58-dbdc308377db" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075510 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fae67e7-bb2e-4713-bb58-dbdc308377db" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075741 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ede99ef-193f-4af6-9180-6b7557463c62" containerName="keystone-bootstrap" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075767 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fae67e7-bb2e-4713-bb58-dbdc308377db" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075778 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075785 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-httpd" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075792 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1244a28a-d29b-4cfd-b59a-b8993162ff33" containerName="init" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075806 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.075820 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" containerName="glance-log" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.076833 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.080163 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.080735 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.080873 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.081019 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lbgzv" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.084497 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.087278 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-z4nb7"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.118916 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.143643 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.167358 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.172791 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.174596 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.174913 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-2sd9k" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.175430 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.179214 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.182216 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.184242 4967 scope.go:117] "RemoveContainer" containerID="ecc74cb8693230dc26656497af421cafabdd6fd870605a42100a83b90ab895a7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.208751 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7hzc\" (UniqueName: \"kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.208868 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.208946 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.209143 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.209181 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.209217 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.214534 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.228954 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.229117 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mlpds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-r85k5_openstack(dd1dc42f-c657-4dd3-9ca3-e8bc865d6280): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:56:13 crc kubenswrapper[4967]: E1121 15:56:13.230452 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-r85k5" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.230535 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.241774 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.243827 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.246652 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.246664 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.253502 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311558 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311613 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311677 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7hzc\" (UniqueName: \"kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311714 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311788 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311835 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.311874 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk86t\" (UniqueName: \"kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312000 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312077 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312112 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312137 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312164 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312253 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.312303 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.317231 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.317679 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.317908 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.318533 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.321390 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.334569 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7hzc\" (UniqueName: \"kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc\") pod \"keystone-bootstrap-z4nb7\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.355683 4967 scope.go:117] "RemoveContainer" containerID="522da33baa379acfe695485250ed4e2e3974128641163970b942d3b0ae6844df" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.404207 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.415096 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.415246 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.415288 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.415374 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.415522 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.416007 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430067 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6tls\" (UniqueName: \"kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430255 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk86t\" (UniqueName: \"kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430377 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430429 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430465 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430534 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430642 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430705 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430735 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430810 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430859 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.430946 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.434039 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.439833 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.451567 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk86t\" (UniqueName: \"kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.465737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.477439 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.482859 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.487112 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.522663 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.533810 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.533873 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.533928 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534004 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534042 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534102 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534185 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534277 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6tls\" (UniqueName: \"kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534440 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534761 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.534787 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.545027 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.548039 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.548293 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.550074 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.554688 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6tls\" (UniqueName: \"kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.596398 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " pod="openstack/glance-default-external-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.635932 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4svb\" (UniqueName: \"kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb\") pod \"3678c17b-6120-4063-b84d-7cb362b46b62\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.636440 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc\") pod \"3678c17b-6120-4063-b84d-7cb362b46b62\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.636588 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config\") pod \"3678c17b-6120-4063-b84d-7cb362b46b62\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.636654 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb\") pod \"3678c17b-6120-4063-b84d-7cb362b46b62\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.636858 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb\") pod \"3678c17b-6120-4063-b84d-7cb362b46b62\" (UID: \"3678c17b-6120-4063-b84d-7cb362b46b62\") " Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.641788 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb" (OuterVolumeSpecName: "kube-api-access-s4svb") pod "3678c17b-6120-4063-b84d-7cb362b46b62" (UID: "3678c17b-6120-4063-b84d-7cb362b46b62"). InnerVolumeSpecName "kube-api-access-s4svb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.692769 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config" (OuterVolumeSpecName: "config") pod "3678c17b-6120-4063-b84d-7cb362b46b62" (UID: "3678c17b-6120-4063-b84d-7cb362b46b62"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.694877 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3678c17b-6120-4063-b84d-7cb362b46b62" (UID: "3678c17b-6120-4063-b84d-7cb362b46b62"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.695032 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3678c17b-6120-4063-b84d-7cb362b46b62" (UID: "3678c17b-6120-4063-b84d-7cb362b46b62"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.697829 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3678c17b-6120-4063-b84d-7cb362b46b62" (UID: "3678c17b-6120-4063-b84d-7cb362b46b62"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.741109 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.741432 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4svb\" (UniqueName: \"kubernetes.io/projected/3678c17b-6120-4063-b84d-7cb362b46b62-kube-api-access-s4svb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.741498 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.741551 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.741614 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3678c17b-6120-4063-b84d-7cb362b46b62-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.805708 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:13 crc kubenswrapper[4967]: I1121 15:56:13.815438 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.061862 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerStarted","Data":"4a6099baad164d23e4b74d57e9a05aea89e6b80a4d326a9d329049a33a1b0610"} Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.064162 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" event={"ID":"3678c17b-6120-4063-b84d-7cb362b46b62","Type":"ContainerDied","Data":"481cc457850f09de5f6733622295e785de44db3a183cba3a7624a5d78c5052a1"} Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.064272 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-rdtt2" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.072040 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"89623f225b65a94cc2c734cb46ef1ee33b5f68f340ce8418b1820ae81ea3b8da"} Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.118395 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.130888 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-rdtt2"] Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.330000 4967 scope.go:117] "RemoveContainer" containerID="e18249eeeb2c1b5c3d93d87f40580ca99ad5ae82a59aac306906185d19fed559" Nov 21 15:56:14 crc kubenswrapper[4967]: E1121 15:56:14.330164 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-r85k5" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.379993 4967 scope.go:117] "RemoveContainer" containerID="d00e911b5f610723bdfe86e653cc8ee024c662cb4968b26abf6b9606993f8579" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.556688 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ede99ef-193f-4af6-9180-6b7557463c62" path="/var/lib/kubelet/pods/2ede99ef-193f-4af6-9180-6b7557463c62/volumes" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.558042 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" path="/var/lib/kubelet/pods/3678c17b-6120-4063-b84d-7cb362b46b62/volumes" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.558844 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="412d7d72-53e5-43e5-a85d-0fef4785dda1" path="/var/lib/kubelet/pods/412d7d72-53e5-43e5-a85d-0fef4785dda1/volumes" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.560479 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad125b00-796b-4208-a2ad-8488ec4d2fd9" path="/var/lib/kubelet/pods/ad125b00-796b-4208-a2ad-8488ec4d2fd9/volumes" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.646238 4967 scope.go:117] "RemoveContainer" containerID="d24740e47a04bfc4aabd110bb8e8aaf83d65fe875b69fb7d6a58f4754f1f8953" Nov 21 15:56:14 crc kubenswrapper[4967]: I1121 15:56:14.855008 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-z4nb7"] Nov 21 15:56:14 crc kubenswrapper[4967]: W1121 15:56:14.859205 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0acc480b_ec94_4ce1_af6f_d20f9e5f45c2.slice/crio-6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260 WatchSource:0}: Error finding container 6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260: Status 404 returned error can't find the container with id 6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260 Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.061060 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.097929 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"4bf5cb6a-c8f8-43c3-b546-282bfd3244e2","Type":"ContainerStarted","Data":"29a0304c16acc1ad9105fd5d2fdee17914df8adea7f7004c6e30b17721a02b4e"} Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.106025 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nb7" event={"ID":"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2","Type":"ContainerStarted","Data":"6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260"} Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.109433 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zb9np" event={"ID":"6d0b548b-65d2-496b-a8f0-5556b5e9760e","Type":"ContainerStarted","Data":"53109c647d135c73ad4240c502d8f1fec68a0806f504bb19180addbd8c74564d"} Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.146953 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=84.158544449 podStartE2EDuration="1m34.146934753s" podCreationTimestamp="2025-11-21 15:54:41 +0000 UTC" firstStartedPulling="2025-11-21 15:55:17.807216614 +0000 UTC m=+1206.065737632" lastFinishedPulling="2025-11-21 15:55:27.795606928 +0000 UTC m=+1216.054127936" observedRunningTime="2025-11-21 15:56:15.140113916 +0000 UTC m=+1263.398634924" watchObservedRunningTime="2025-11-21 15:56:15.146934753 +0000 UTC m=+1263.405455761" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.170135 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.172319 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-zb9np" podStartSLOduration=5.162609976 podStartE2EDuration="50.172296446s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="2025-11-21 15:55:29.380802385 +0000 UTC m=+1217.639323393" lastFinishedPulling="2025-11-21 15:56:14.390488855 +0000 UTC m=+1262.649009863" observedRunningTime="2025-11-21 15:56:15.164155871 +0000 UTC m=+1263.422676899" watchObservedRunningTime="2025-11-21 15:56:15.172296446 +0000 UTC m=+1263.430817444" Nov 21 15:56:15 crc kubenswrapper[4967]: W1121 15:56:15.332748 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod092d2168_5e3a_4967_a0b7_2f4b85a90487.slice/crio-a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497 WatchSource:0}: Error finding container a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497: Status 404 returned error can't find the container with id a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497 Nov 21 15:56:15 crc kubenswrapper[4967]: W1121 15:56:15.344478 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c73f5a0_e6d9_439d_be71_aa94fbdc6c4f.slice/crio-9b1bd8e379286f1052ec9cfaf2e14f8f29b3433246e42c0fdd54aed222356459 WatchSource:0}: Error finding container 9b1bd8e379286f1052ec9cfaf2e14f8f29b3433246e42c0fdd54aed222356459: Status 404 returned error can't find the container with id 9b1bd8e379286f1052ec9cfaf2e14f8f29b3433246e42c0fdd54aed222356459 Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.437719 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:15 crc kubenswrapper[4967]: E1121 15:56:15.438181 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="init" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.438200 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="init" Nov 21 15:56:15 crc kubenswrapper[4967]: E1121 15:56:15.438249 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.438257 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.438666 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3678c17b-6120-4063-b84d-7cb362b46b62" containerName="dnsmasq-dns" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.440194 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.443388 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.452271 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583400 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583516 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583557 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583600 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583661 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mwj6\" (UniqueName: \"kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.583691 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688294 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mwj6\" (UniqueName: \"kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688415 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688493 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688572 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688658 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.688738 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.689861 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.690576 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.690801 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.690928 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.691087 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.712929 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mwj6\" (UniqueName: \"kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6\") pod \"dnsmasq-dns-56df8fb6b7-blfrj\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:15 crc kubenswrapper[4967]: I1121 15:56:15.776340 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.129818 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerStarted","Data":"9b1bd8e379286f1052ec9cfaf2e14f8f29b3433246e42c0fdd54aed222356459"} Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.133280 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nb7" event={"ID":"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2","Type":"ContainerStarted","Data":"f0b7e826cff02dfc47860f3f0c7f87b7911d250ed06464577aaf8ef203efb96a"} Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.137433 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerStarted","Data":"a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497"} Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.151519 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerStarted","Data":"78177d37dda4a777255af8266702bdde12386a0ca9f3803603cfcaf6a78fe6e1"} Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.162269 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-z4nb7" podStartSLOduration=3.162250668 podStartE2EDuration="3.162250668s" podCreationTimestamp="2025-11-21 15:56:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:16.153425873 +0000 UTC m=+1264.411946881" watchObservedRunningTime="2025-11-21 15:56:16.162250668 +0000 UTC m=+1264.420771676" Nov 21 15:56:16 crc kubenswrapper[4967]: I1121 15:56:16.315305 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:16 crc kubenswrapper[4967]: W1121 15:56:16.320567 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod036b7ea7_f134_4986_a4ae_ca8725f40ee6.slice/crio-484f4c305cf608eb1f21d145bf5412abb972e5d29fb680b2945afe7c459bed4f WatchSource:0}: Error finding container 484f4c305cf608eb1f21d145bf5412abb972e5d29fb680b2945afe7c459bed4f: Status 404 returned error can't find the container with id 484f4c305cf608eb1f21d145bf5412abb972e5d29fb680b2945afe7c459bed4f Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.173825 4967 generic.go:334] "Generic (PLEG): container finished" podID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerID="78d254d881303628bb2863105ae549ca19b43743bcbd3024526af923092b9aed" exitCode=0 Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.173945 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" event={"ID":"036b7ea7-f134-4986-a4ae-ca8725f40ee6","Type":"ContainerDied","Data":"78d254d881303628bb2863105ae549ca19b43743bcbd3024526af923092b9aed"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.174032 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" event={"ID":"036b7ea7-f134-4986-a4ae-ca8725f40ee6","Type":"ContainerStarted","Data":"484f4c305cf608eb1f21d145bf5412abb972e5d29fb680b2945afe7c459bed4f"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.177889 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerStarted","Data":"aaf7de92700f13b43c6dda63553c636e3e8a02bb1b2dc4cef73764f54d4ba038"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.177935 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerStarted","Data":"b2a9008ab5ab53fda96ffa0e2ded04125689e319f061812d7e5bd689a69dbf4a"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.181728 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerStarted","Data":"874e5b6d9db34d44f43a3bd2057069b1e10198bf5cf524250d237bf0ce9ce8d7"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.181757 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerStarted","Data":"5b9bca81bb29bbba4965712b67772e9516b0de09b6b7075258575b7c8f627668"} Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.248009 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.247981326 podStartE2EDuration="4.247981326s" podCreationTimestamp="2025-11-21 15:56:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:17.242009294 +0000 UTC m=+1265.500530322" watchObservedRunningTime="2025-11-21 15:56:17.247981326 +0000 UTC m=+1265.506502334" Nov 21 15:56:17 crc kubenswrapper[4967]: I1121 15:56:17.301616 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.301596345 podStartE2EDuration="4.301596345s" podCreationTimestamp="2025-11-21 15:56:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:17.284000247 +0000 UTC m=+1265.542521265" watchObservedRunningTime="2025-11-21 15:56:17.301596345 +0000 UTC m=+1265.560117353" Nov 21 15:56:18 crc kubenswrapper[4967]: I1121 15:56:18.194693 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" event={"ID":"036b7ea7-f134-4986-a4ae-ca8725f40ee6","Type":"ContainerStarted","Data":"4b2594431cca1d7af0cbb78911696cae035b16c57828192ffb2f4c4c8accf0f0"} Nov 21 15:56:19 crc kubenswrapper[4967]: I1121 15:56:19.202365 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.248774 4967 generic.go:334] "Generic (PLEG): container finished" podID="0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" containerID="f0b7e826cff02dfc47860f3f0c7f87b7911d250ed06464577aaf8ef203efb96a" exitCode=0 Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.249196 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nb7" event={"ID":"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2","Type":"ContainerDied","Data":"f0b7e826cff02dfc47860f3f0c7f87b7911d250ed06464577aaf8ef203efb96a"} Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.274417 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" podStartSLOduration=8.274401926 podStartE2EDuration="8.274401926s" podCreationTimestamp="2025-11-21 15:56:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:18.21304746 +0000 UTC m=+1266.471568478" watchObservedRunningTime="2025-11-21 15:56:23.274401926 +0000 UTC m=+1271.532922934" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.806586 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.806634 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.817260 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.817361 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.918013 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.918567 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.918653 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 15:56:23 crc kubenswrapper[4967]: I1121 15:56:23.918700 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.260913 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.260959 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.260971 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.260982 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.642298 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.712681 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.712787 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.712913 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.712935 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.712959 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.713072 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7hzc\" (UniqueName: \"kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc\") pod \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\" (UID: \"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2\") " Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.719338 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc" (OuterVolumeSpecName: "kube-api-access-q7hzc") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "kube-api-access-q7hzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.719766 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts" (OuterVolumeSpecName: "scripts") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.719789 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.721985 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.744744 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data" (OuterVolumeSpecName: "config-data") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.747416 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" (UID: "0acc480b-ec94-4ce1-af6f-d20f9e5f45c2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816283 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816337 4967 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816347 4967 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816358 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7hzc\" (UniqueName: \"kubernetes.io/projected/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-kube-api-access-q7hzc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816368 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:24 crc kubenswrapper[4967]: I1121 15:56:24.816376 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.274285 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-z4nb7" event={"ID":"0acc480b-ec94-4ce1-af6f-d20f9e5f45c2","Type":"ContainerDied","Data":"6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260"} Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.274390 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-z4nb7" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.274403 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bdd0ae94b8fd276aec44a08056be6a5d600211aaa86805d3348fedba2397260" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.365108 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7c9cd95b4b-sqmvx"] Nov 21 15:56:25 crc kubenswrapper[4967]: E1121 15:56:25.366444 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" containerName="keystone-bootstrap" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.366490 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" containerName="keystone-bootstrap" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.367112 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" containerName="keystone-bootstrap" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.368712 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372437 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372624 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372684 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372717 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372760 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lbgzv" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.372821 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.392380 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c9cd95b4b-sqmvx"] Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430214 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-fernet-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430275 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dthvw\" (UniqueName: \"kubernetes.io/projected/cfda2b1a-4625-4150-9b58-c958f677ceb6-kube-api-access-dthvw\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430380 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-credential-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430461 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-scripts\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430543 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-public-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430579 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-config-data\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430608 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-internal-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.430630 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-combined-ca-bundle\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532369 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-public-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532447 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-config-data\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532498 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-internal-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532523 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-combined-ca-bundle\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532620 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-fernet-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532644 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dthvw\" (UniqueName: \"kubernetes.io/projected/cfda2b1a-4625-4150-9b58-c958f677ceb6-kube-api-access-dthvw\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532685 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-credential-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.532770 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-scripts\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.538728 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-internal-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.539448 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-public-tls-certs\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.540139 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-combined-ca-bundle\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.540626 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-scripts\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.540770 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-config-data\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.541226 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-fernet-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.541482 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/cfda2b1a-4625-4150-9b58-c958f677ceb6-credential-keys\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.561673 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dthvw\" (UniqueName: \"kubernetes.io/projected/cfda2b1a-4625-4150-9b58-c958f677ceb6-kube-api-access-dthvw\") pod \"keystone-7c9cd95b4b-sqmvx\" (UID: \"cfda2b1a-4625-4150-9b58-c958f677ceb6\") " pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.687255 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.778496 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.855575 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:56:25 crc kubenswrapper[4967]: I1121 15:56:25.855928 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="dnsmasq-dns" containerID="cri-o://3bfc8fac75006f89d9cb64fef1803839d9d486d0b5248f9b30304a8f973d2c63" gracePeriod=10 Nov 21 15:56:26 crc kubenswrapper[4967]: I1121 15:56:26.289881 4967 generic.go:334] "Generic (PLEG): container finished" podID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerID="3bfc8fac75006f89d9cb64fef1803839d9d486d0b5248f9b30304a8f973d2c63" exitCode=0 Nov 21 15:56:26 crc kubenswrapper[4967]: I1121 15:56:26.290209 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerDied","Data":"3bfc8fac75006f89d9cb64fef1803839d9d486d0b5248f9b30304a8f973d2c63"} Nov 21 15:56:26 crc kubenswrapper[4967]: I1121 15:56:26.292740 4967 generic.go:334] "Generic (PLEG): container finished" podID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" containerID="53109c647d135c73ad4240c502d8f1fec68a0806f504bb19180addbd8c74564d" exitCode=0 Nov 21 15:56:26 crc kubenswrapper[4967]: I1121 15:56:26.292804 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zb9np" event={"ID":"6d0b548b-65d2-496b-a8f0-5556b5e9760e","Type":"ContainerDied","Data":"53109c647d135c73ad4240c502d8f1fec68a0806f504bb19180addbd8c74564d"} Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.661645 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zb9np" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.703904 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data\") pod \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.704090 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgfw5\" (UniqueName: \"kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5\") pod \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.704254 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs\") pod \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.704382 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle\") pod \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.704407 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts\") pod \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\" (UID: \"6d0b548b-65d2-496b-a8f0-5556b5e9760e\") " Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.704632 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs" (OuterVolumeSpecName: "logs") pod "6d0b548b-65d2-496b-a8f0-5556b5e9760e" (UID: "6d0b548b-65d2-496b-a8f0-5556b5e9760e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.705146 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6d0b548b-65d2-496b-a8f0-5556b5e9760e-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.711294 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5" (OuterVolumeSpecName: "kube-api-access-fgfw5") pod "6d0b548b-65d2-496b-a8f0-5556b5e9760e" (UID: "6d0b548b-65d2-496b-a8f0-5556b5e9760e"). InnerVolumeSpecName "kube-api-access-fgfw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.725207 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts" (OuterVolumeSpecName: "scripts") pod "6d0b548b-65d2-496b-a8f0-5556b5e9760e" (UID: "6d0b548b-65d2-496b-a8f0-5556b5e9760e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.737207 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data" (OuterVolumeSpecName: "config-data") pod "6d0b548b-65d2-496b-a8f0-5556b5e9760e" (UID: "6d0b548b-65d2-496b-a8f0-5556b5e9760e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.738888 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d0b548b-65d2-496b-a8f0-5556b5e9760e" (UID: "6d0b548b-65d2-496b-a8f0-5556b5e9760e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.807800 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgfw5\" (UniqueName: \"kubernetes.io/projected/6d0b548b-65d2-496b-a8f0-5556b5e9760e-kube-api-access-fgfw5\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.807986 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.808053 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.808107 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d0b548b-65d2-496b-a8f0-5556b5e9760e-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.960377 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.960742 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.970322 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:56:28 crc kubenswrapper[4967]: I1121 15:56:28.987516 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.021883 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.021975 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.042114 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.118783 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbfkv\" (UniqueName: \"kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv\") pod \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.118835 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc\") pod \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.118864 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb\") pod \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.118958 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config\") pod \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.119036 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb\") pod \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\" (UID: \"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99\") " Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.178103 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv" (OuterVolumeSpecName: "kube-api-access-bbfkv") pod "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" (UID: "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99"). InnerVolumeSpecName "kube-api-access-bbfkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.183294 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c9cd95b4b-sqmvx"] Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.232371 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbfkv\" (UniqueName: \"kubernetes.io/projected/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-kube-api-access-bbfkv\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.362627 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" (UID: "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.374674 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config" (OuterVolumeSpecName: "config") pod "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" (UID: "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.380546 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c9cd95b4b-sqmvx" event={"ID":"cfda2b1a-4625-4150-9b58-c958f677ceb6","Type":"ContainerStarted","Data":"b6e8305c599774b44396fdb33e51babd2cc746b32092202b42ff36a16c905ae4"} Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.387395 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" (UID: "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.389050 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerStarted","Data":"85ae66797cfcf9e867815a993b11fc9b91d45bafecfafd6336cc2fbe8be173f8"} Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.391239 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zb9np" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.392974 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zb9np" event={"ID":"6d0b548b-65d2-496b-a8f0-5556b5e9760e","Type":"ContainerDied","Data":"88549493ac0fdb7848faaa622469a99b465e3a9f77d4470edef43ab83a865848"} Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.393024 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88549493ac0fdb7848faaa622469a99b465e3a9f77d4470edef43ab83a865848" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.394760 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" (UID: "2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.416794 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.416783 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56798b757f-7zmcj" event={"ID":"2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99","Type":"ContainerDied","Data":"fb4eda99c574d13cf23bb6d080e1b5323d2f357e0502c0f5d3cffdce14fda2ea"} Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.416962 4967 scope.go:117] "RemoveContainer" containerID="3bfc8fac75006f89d9cb64fef1803839d9d486d0b5248f9b30304a8f973d2c63" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.434037 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r85k5" event={"ID":"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280","Type":"ContainerStarted","Data":"3ab8ec91182125356d6f72f13f5cd55bcab37827d2242e80ed41a42216834c91"} Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.443219 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.443250 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.443263 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.443274 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.468111 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-r85k5" podStartSLOduration=4.836993419 podStartE2EDuration="1m4.468087196s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="2025-11-21 15:55:29.159201653 +0000 UTC m=+1217.417722671" lastFinishedPulling="2025-11-21 15:56:28.79029544 +0000 UTC m=+1277.048816448" observedRunningTime="2025-11-21 15:56:29.451130286 +0000 UTC m=+1277.709651294" watchObservedRunningTime="2025-11-21 15:56:29.468087196 +0000 UTC m=+1277.726608204" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.488623 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.499412 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56798b757f-7zmcj"] Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.503109 4967 scope.go:117] "RemoveContainer" containerID="00cbdcae1c898b75b060b409434501f57a759647179f1d69842992ee69aa8331" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.824642 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6d9cf75cd4-wgblt"] Nov 21 15:56:29 crc kubenswrapper[4967]: E1121 15:56:29.825369 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" containerName="placement-db-sync" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.825382 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" containerName="placement-db-sync" Nov 21 15:56:29 crc kubenswrapper[4967]: E1121 15:56:29.825395 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="dnsmasq-dns" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.825401 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="dnsmasq-dns" Nov 21 15:56:29 crc kubenswrapper[4967]: E1121 15:56:29.825428 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="init" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.825434 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="init" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.826235 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" containerName="dnsmasq-dns" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.826248 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" containerName="placement-db-sync" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.827469 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.831553 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.833387 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.844385 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.844605 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8f7vq" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.844679 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.874802 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6d9cf75cd4-wgblt"] Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.958618 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-public-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.958772 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-internal-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.958932 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47bf4cdc-4292-43c3-b9c7-2bb28905204b-logs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.959079 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-combined-ca-bundle\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.959119 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5h8p\" (UniqueName: \"kubernetes.io/projected/47bf4cdc-4292-43c3-b9c7-2bb28905204b-kube-api-access-v5h8p\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.959154 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-config-data\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:29 crc kubenswrapper[4967]: I1121 15:56:29.959211 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-scripts\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061271 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-config-data\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061336 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-scripts\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061408 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-public-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061471 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-internal-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47bf4cdc-4292-43c3-b9c7-2bb28905204b-logs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061668 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-combined-ca-bundle\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.061700 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5h8p\" (UniqueName: \"kubernetes.io/projected/47bf4cdc-4292-43c3-b9c7-2bb28905204b-kube-api-access-v5h8p\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.062465 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47bf4cdc-4292-43c3-b9c7-2bb28905204b-logs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.067529 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-scripts\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.068174 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-config-data\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.073915 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-public-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.081985 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5h8p\" (UniqueName: \"kubernetes.io/projected/47bf4cdc-4292-43c3-b9c7-2bb28905204b-kube-api-access-v5h8p\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.082302 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-combined-ca-bundle\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.084021 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/47bf4cdc-4292-43c3-b9c7-2bb28905204b-internal-tls-certs\") pod \"placement-6d9cf75cd4-wgblt\" (UID: \"47bf4cdc-4292-43c3-b9c7-2bb28905204b\") " pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.186842 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.446488 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-96plk" event={"ID":"e0fc2724-5c56-4db8-9a1e-4662761791c3","Type":"ContainerStarted","Data":"dc70f55b12b5d706d65e3c4210fa1892ead921c65eba34d129c8494f817d418e"} Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.448385 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c9cd95b4b-sqmvx" event={"ID":"cfda2b1a-4625-4150-9b58-c958f677ceb6","Type":"ContainerStarted","Data":"cd0a9dbf7712f15f2c63f5e178c39a6c5371afe7b214af9bcc12f5e171282653"} Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.448529 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.449760 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pjq5x" event={"ID":"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad","Type":"ContainerStarted","Data":"699e112236b049b9b6e92fd13712daa433d3c0930373e844ba272abeb4c508d3"} Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.469855 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-96plk" podStartSLOduration=4.327082495 podStartE2EDuration="1m5.46983489s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="2025-11-21 15:55:27.644497077 +0000 UTC m=+1215.903018085" lastFinishedPulling="2025-11-21 15:56:28.787249472 +0000 UTC m=+1277.045770480" observedRunningTime="2025-11-21 15:56:30.469067347 +0000 UTC m=+1278.727588375" watchObservedRunningTime="2025-11-21 15:56:30.46983489 +0000 UTC m=+1278.728355918" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.516115 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-pjq5x" podStartSLOduration=4.367026061 podStartE2EDuration="1m5.516094416s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="2025-11-21 15:55:27.638273119 +0000 UTC m=+1215.896794127" lastFinishedPulling="2025-11-21 15:56:28.787341474 +0000 UTC m=+1277.045862482" observedRunningTime="2025-11-21 15:56:30.492051671 +0000 UTC m=+1278.750572679" watchObservedRunningTime="2025-11-21 15:56:30.516094416 +0000 UTC m=+1278.774615424" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.518437 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7c9cd95b4b-sqmvx" podStartSLOduration=5.518429693 podStartE2EDuration="5.518429693s" podCreationTimestamp="2025-11-21 15:56:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:30.509110504 +0000 UTC m=+1278.767631502" watchObservedRunningTime="2025-11-21 15:56:30.518429693 +0000 UTC m=+1278.776950781" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.554601 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99" path="/var/lib/kubelet/pods/2ed7540a-3dcf-4bf8-b6b4-13e4f4618a99/volumes" Nov 21 15:56:30 crc kubenswrapper[4967]: I1121 15:56:30.754963 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6d9cf75cd4-wgblt"] Nov 21 15:56:31 crc kubenswrapper[4967]: I1121 15:56:31.467323 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6d9cf75cd4-wgblt" event={"ID":"47bf4cdc-4292-43c3-b9c7-2bb28905204b","Type":"ContainerStarted","Data":"0c6f33e42291972ba34656f4cdd8cdb11d3b8f37592e4b920444aa6b3bd43eb5"} Nov 21 15:56:32 crc kubenswrapper[4967]: I1121 15:56:32.476552 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6d9cf75cd4-wgblt" event={"ID":"47bf4cdc-4292-43c3-b9c7-2bb28905204b","Type":"ContainerStarted","Data":"083bc1a73ff6cb2a7809c690eb005a75c81f23529e92a4ed9c24f9405e937aa5"} Nov 21 15:56:32 crc kubenswrapper[4967]: I1121 15:56:32.476859 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6d9cf75cd4-wgblt" event={"ID":"47bf4cdc-4292-43c3-b9c7-2bb28905204b","Type":"ContainerStarted","Data":"26cf654fda949a46e6ca8c0d838345f591e53ead06b68118ca1659bf2884c785"} Nov 21 15:56:32 crc kubenswrapper[4967]: I1121 15:56:32.477305 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:32 crc kubenswrapper[4967]: I1121 15:56:32.477479 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:56:32 crc kubenswrapper[4967]: I1121 15:56:32.501426 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6d9cf75cd4-wgblt" podStartSLOduration=3.501408106 podStartE2EDuration="3.501408106s" podCreationTimestamp="2025-11-21 15:56:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:32.499212473 +0000 UTC m=+1280.757733501" watchObservedRunningTime="2025-11-21 15:56:32.501408106 +0000 UTC m=+1280.759929124" Nov 21 15:56:36 crc kubenswrapper[4967]: I1121 15:56:36.517216 4967 generic.go:334] "Generic (PLEG): container finished" podID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" containerID="3ab8ec91182125356d6f72f13f5cd55bcab37827d2242e80ed41a42216834c91" exitCode=0 Nov 21 15:56:36 crc kubenswrapper[4967]: I1121 15:56:36.517297 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r85k5" event={"ID":"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280","Type":"ContainerDied","Data":"3ab8ec91182125356d6f72f13f5cd55bcab37827d2242e80ed41a42216834c91"} Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.534692 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerStarted","Data":"d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3"} Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.534915 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-central-agent" containerID="cri-o://4a6099baad164d23e4b74d57e9a05aea89e6b80a4d326a9d329049a33a1b0610" gracePeriod=30 Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.534934 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="proxy-httpd" containerID="cri-o://d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3" gracePeriod=30 Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.534958 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-notification-agent" containerID="cri-o://78177d37dda4a777255af8266702bdde12386a0ca9f3803603cfcaf6a78fe6e1" gracePeriod=30 Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.534949 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="sg-core" containerID="cri-o://85ae66797cfcf9e867815a993b11fc9b91d45bafecfafd6336cc2fbe8be173f8" gracePeriod=30 Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.571094 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.480576037 podStartE2EDuration="1m12.571075382s" podCreationTimestamp="2025-11-21 15:55:25 +0000 UTC" firstStartedPulling="2025-11-21 15:55:29.479932966 +0000 UTC m=+1217.738453984" lastFinishedPulling="2025-11-21 15:56:36.570432321 +0000 UTC m=+1284.828953329" observedRunningTime="2025-11-21 15:56:37.56337673 +0000 UTC m=+1285.821897738" watchObservedRunningTime="2025-11-21 15:56:37.571075382 +0000 UTC m=+1285.829596390" Nov 21 15:56:37 crc kubenswrapper[4967]: E1121 15:56:37.867532 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0b6e974_ebcc_4421_879e_d711bd689855.slice/crio-conmon-d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3.scope\": RecentStats: unable to find data in memory cache]" Nov 21 15:56:37 crc kubenswrapper[4967]: I1121 15:56:37.920984 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r85k5" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.043494 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data\") pod \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.043588 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle\") pod \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.043912 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlpds\" (UniqueName: \"kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds\") pod \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\" (UID: \"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280\") " Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.049511 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds" (OuterVolumeSpecName: "kube-api-access-mlpds") pod "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" (UID: "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280"). InnerVolumeSpecName "kube-api-access-mlpds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.049506 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" (UID: "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.072848 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" (UID: "dd1dc42f-c657-4dd3-9ca3-e8bc865d6280"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.146681 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlpds\" (UniqueName: \"kubernetes.io/projected/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-kube-api-access-mlpds\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.146718 4967 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.146728 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.553168 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0b6e974-ebcc-4421-879e-d711bd689855" containerID="d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3" exitCode=0 Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.553209 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0b6e974-ebcc-4421-879e-d711bd689855" containerID="85ae66797cfcf9e867815a993b11fc9b91d45bafecfafd6336cc2fbe8be173f8" exitCode=2 Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.553224 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0b6e974-ebcc-4421-879e-d711bd689855" containerID="4a6099baad164d23e4b74d57e9a05aea89e6b80a4d326a9d329049a33a1b0610" exitCode=0 Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.555473 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r85k5" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.557551 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerDied","Data":"d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3"} Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.557605 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerDied","Data":"85ae66797cfcf9e867815a993b11fc9b91d45bafecfafd6336cc2fbe8be173f8"} Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.557625 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerDied","Data":"4a6099baad164d23e4b74d57e9a05aea89e6b80a4d326a9d329049a33a1b0610"} Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.557641 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r85k5" event={"ID":"dd1dc42f-c657-4dd3-9ca3-e8bc865d6280","Type":"ContainerDied","Data":"957a4d9e3cf597ef5af2e2be4b081fc187f4798058301fba046bf533c2bc369b"} Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.557659 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="957a4d9e3cf597ef5af2e2be4b081fc187f4798058301fba046bf533c2bc369b" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.707794 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-bb45fb999-wtdcm"] Nov 21 15:56:38 crc kubenswrapper[4967]: E1121 15:56:38.708532 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" containerName="barbican-db-sync" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.708547 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" containerName="barbican-db-sync" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.708762 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" containerName="barbican-db-sync" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.709851 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.716541 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-g4dqx" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.717002 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.717248 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.722593 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-8784d986b-zqb7r"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.724862 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.727899 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.735627 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bb45fb999-wtdcm"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.795453 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-8784d986b-zqb7r"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.830675 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.837755 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.849976 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.866705 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.866799 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-combined-ca-bundle\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.866838 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ffbf986-245c-45b4-b6e1-544c887362be-logs\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.866956 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867021 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp8lz\" (UniqueName: \"kubernetes.io/projected/4ffbf986-245c-45b4-b6e1-544c887362be-kube-api-access-mp8lz\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867086 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166a1693-08af-47c6-a9b7-283fb1edfc10-logs\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867214 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data-custom\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867380 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data-custom\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867440 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-combined-ca-bundle\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.867473 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nc9ck\" (UniqueName: \"kubernetes.io/projected/166a1693-08af-47c6-a9b7-283fb1edfc10-kube-api-access-nc9ck\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.927757 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.932684 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.935440 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.966621 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.970839 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-combined-ca-bundle\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.970876 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nc9ck\" (UniqueName: \"kubernetes.io/projected/166a1693-08af-47c6-a9b7-283fb1edfc10-kube-api-access-nc9ck\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.970921 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.970948 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.970985 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-combined-ca-bundle\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971000 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971018 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ffbf986-245c-45b4-b6e1-544c887362be-logs\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971038 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971068 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971112 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971131 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971160 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp8lz\" (UniqueName: \"kubernetes.io/projected/4ffbf986-245c-45b4-b6e1-544c887362be-kube-api-access-mp8lz\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971176 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg76s\" (UniqueName: \"kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971200 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166a1693-08af-47c6-a9b7-283fb1edfc10-logs\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971254 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data-custom\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.971275 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data-custom\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.972653 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/166a1693-08af-47c6-a9b7-283fb1edfc10-logs\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.976565 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data-custom\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.977725 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-config-data\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.978219 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ffbf986-245c-45b4-b6e1-544c887362be-logs\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.979426 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data-custom\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.979752 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-combined-ca-bundle\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.983431 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ffbf986-245c-45b4-b6e1-544c887362be-combined-ca-bundle\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.984245 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/166a1693-08af-47c6-a9b7-283fb1edfc10-config-data\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:38 crc kubenswrapper[4967]: I1121 15:56:38.994191 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp8lz\" (UniqueName: \"kubernetes.io/projected/4ffbf986-245c-45b4-b6e1-544c887362be-kube-api-access-mp8lz\") pod \"barbican-keystone-listener-8784d986b-zqb7r\" (UID: \"4ffbf986-245c-45b4-b6e1-544c887362be\") " pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.001180 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nc9ck\" (UniqueName: \"kubernetes.io/projected/166a1693-08af-47c6-a9b7-283fb1edfc10-kube-api-access-nc9ck\") pod \"barbican-worker-bb45fb999-wtdcm\" (UID: \"166a1693-08af-47c6-a9b7-283fb1edfc10\") " pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.062428 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-bb45fb999-wtdcm" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.072769 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.073699 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.074489 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075210 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075240 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075295 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg76s\" (UniqueName: \"kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075683 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075768 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075848 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdm42\" (UniqueName: \"kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075901 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.075960 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.076047 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.076081 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.076119 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.076854 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.077549 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.078185 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.105667 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg76s\" (UniqueName: \"kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s\") pod \"dnsmasq-dns-7c67bffd47-t49fk\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.160685 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.178904 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.178952 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.179004 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdm42\" (UniqueName: \"kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.179036 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.179123 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.179628 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.184907 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.184983 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.185183 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.201810 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdm42\" (UniqueName: \"kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42\") pod \"barbican-api-68456f646b-s7b7f\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.497101 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.581514 4967 generic.go:334] "Generic (PLEG): container finished" podID="e0fc2724-5c56-4db8-9a1e-4662761791c3" containerID="dc70f55b12b5d706d65e3c4210fa1892ead921c65eba34d129c8494f817d418e" exitCode=0 Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.581559 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-96plk" event={"ID":"e0fc2724-5c56-4db8-9a1e-4662761791c3","Type":"ContainerDied","Data":"dc70f55b12b5d706d65e3c4210fa1892ead921c65eba34d129c8494f817d418e"} Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.697838 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-8784d986b-zqb7r"] Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.714009 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-bb45fb999-wtdcm"] Nov 21 15:56:39 crc kubenswrapper[4967]: W1121 15:56:39.720412 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod166a1693_08af_47c6_a9b7_283fb1edfc10.slice/crio-a6b6283d312fdc0dbf09bfaf25216a23e49dd57a1c7911f7268e3c6b3e051f98 WatchSource:0}: Error finding container a6b6283d312fdc0dbf09bfaf25216a23e49dd57a1c7911f7268e3c6b3e051f98: Status 404 returned error can't find the container with id a6b6283d312fdc0dbf09bfaf25216a23e49dd57a1c7911f7268e3c6b3e051f98 Nov 21 15:56:39 crc kubenswrapper[4967]: I1121 15:56:39.849109 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:39 crc kubenswrapper[4967]: W1121 15:56:39.864256 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbae95bc3_2fe5_4a3e_86d6_eba75a6220bc.slice/crio-55e30b7d3a3e9522f1f5e3e87ccb387732460f31c98ddc169152928e030208b8 WatchSource:0}: Error finding container 55e30b7d3a3e9522f1f5e3e87ccb387732460f31c98ddc169152928e030208b8: Status 404 returned error can't find the container with id 55e30b7d3a3e9522f1f5e3e87ccb387732460f31c98ddc169152928e030208b8 Nov 21 15:56:40 crc kubenswrapper[4967]: W1121 15:56:40.028907 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda0d4a6bc_2860_495d_8fa4_76668ddcbcec.slice/crio-30247a3fc3eac644cbef26d1b2e0b63ba0ba1d4711dddf8396295bb943c7a674 WatchSource:0}: Error finding container 30247a3fc3eac644cbef26d1b2e0b63ba0ba1d4711dddf8396295bb943c7a674: Status 404 returned error can't find the container with id 30247a3fc3eac644cbef26d1b2e0b63ba0ba1d4711dddf8396295bb943c7a674 Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.036200 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.595631 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bb45fb999-wtdcm" event={"ID":"166a1693-08af-47c6-a9b7-283fb1edfc10","Type":"ContainerStarted","Data":"a6b6283d312fdc0dbf09bfaf25216a23e49dd57a1c7911f7268e3c6b3e051f98"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.596844 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" event={"ID":"4ffbf986-245c-45b4-b6e1-544c887362be","Type":"ContainerStarted","Data":"f7e26b5d4415f468247c71ef7d9dcbcd10204c5ddbcb33b8b5280bf4eeba29c3"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.598892 4967 generic.go:334] "Generic (PLEG): container finished" podID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" containerID="699e112236b049b9b6e92fd13712daa433d3c0930373e844ba272abeb4c508d3" exitCode=0 Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.598943 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pjq5x" event={"ID":"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad","Type":"ContainerDied","Data":"699e112236b049b9b6e92fd13712daa433d3c0930373e844ba272abeb4c508d3"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.600732 4967 generic.go:334] "Generic (PLEG): container finished" podID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerID="c99e39cda58c84136c9ee2399ffe7b86a3c9f66705d7ac9013e3c498414cb656" exitCode=0 Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.600777 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" event={"ID":"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc","Type":"ContainerDied","Data":"c99e39cda58c84136c9ee2399ffe7b86a3c9f66705d7ac9013e3c498414cb656"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.600793 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" event={"ID":"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc","Type":"ContainerStarted","Data":"55e30b7d3a3e9522f1f5e3e87ccb387732460f31c98ddc169152928e030208b8"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.616843 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerStarted","Data":"b7a1cad6be9818a65e19e387824c52a515e65e96296ea132b987ac65a6778cd9"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.617179 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerStarted","Data":"e4209bb0fb9ffacd5e770f3fe263b00db4c9df6775c3bc6150c23ab4302d1f08"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.617196 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerStarted","Data":"30247a3fc3eac644cbef26d1b2e0b63ba0ba1d4711dddf8396295bb943c7a674"} Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.617520 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.617556 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:40 crc kubenswrapper[4967]: I1121 15:56:40.680833 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68456f646b-s7b7f" podStartSLOduration=2.68081213 podStartE2EDuration="2.68081213s" podCreationTimestamp="2025-11-21 15:56:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:40.657346572 +0000 UTC m=+1288.915867590" watchObservedRunningTime="2025-11-21 15:56:40.68081213 +0000 UTC m=+1288.939333138" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.211448 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-96plk" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.336573 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data\") pod \"e0fc2724-5c56-4db8-9a1e-4662761791c3\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.337302 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5gzv\" (UniqueName: \"kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv\") pod \"e0fc2724-5c56-4db8-9a1e-4662761791c3\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.337458 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle\") pod \"e0fc2724-5c56-4db8-9a1e-4662761791c3\" (UID: \"e0fc2724-5c56-4db8-9a1e-4662761791c3\") " Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.342432 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv" (OuterVolumeSpecName: "kube-api-access-m5gzv") pod "e0fc2724-5c56-4db8-9a1e-4662761791c3" (UID: "e0fc2724-5c56-4db8-9a1e-4662761791c3"). InnerVolumeSpecName "kube-api-access-m5gzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.376538 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0fc2724-5c56-4db8-9a1e-4662761791c3" (UID: "e0fc2724-5c56-4db8-9a1e-4662761791c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.440207 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.440534 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5gzv\" (UniqueName: \"kubernetes.io/projected/e0fc2724-5c56-4db8-9a1e-4662761791c3-kube-api-access-m5gzv\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.445266 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data" (OuterVolumeSpecName: "config-data") pod "e0fc2724-5c56-4db8-9a1e-4662761791c3" (UID: "e0fc2724-5c56-4db8-9a1e-4662761791c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.543019 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fc2724-5c56-4db8-9a1e-4662761791c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.630046 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-96plk" event={"ID":"e0fc2724-5c56-4db8-9a1e-4662761791c3","Type":"ContainerDied","Data":"548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5"} Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.630094 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="548cc09f86cdde0944471433c3b63539fcb651eefd250fd9c5c58b56f7efdcd5" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.630113 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-96plk" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.703477 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5969866c74-lgff2"] Nov 21 15:56:41 crc kubenswrapper[4967]: E1121 15:56:41.703918 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" containerName="heat-db-sync" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.703937 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" containerName="heat-db-sync" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.706661 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" containerName="heat-db-sync" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.707878 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.714200 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.714465 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.756554 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5969866c74-lgff2"] Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767446 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-public-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767512 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16a9a790-55c8-4924-ae4d-c788238f8211-logs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767688 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767726 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xklhn\" (UniqueName: \"kubernetes.io/projected/16a9a790-55c8-4924-ae4d-c788238f8211-kube-api-access-xklhn\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767762 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-combined-ca-bundle\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767815 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data-custom\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.767961 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-internal-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.873800 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-public-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.874335 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16a9a790-55c8-4924-ae4d-c788238f8211-logs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.874975 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.875018 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xklhn\" (UniqueName: \"kubernetes.io/projected/16a9a790-55c8-4924-ae4d-c788238f8211-kube-api-access-xklhn\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.875071 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-combined-ca-bundle\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.875185 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data-custom\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.875386 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-internal-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.880141 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16a9a790-55c8-4924-ae4d-c788238f8211-logs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.880173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data-custom\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.881637 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-config-data\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.881888 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-internal-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.882519 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-public-tls-certs\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.883796 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16a9a790-55c8-4924-ae4d-c788238f8211-combined-ca-bundle\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:41 crc kubenswrapper[4967]: I1121 15:56:41.890941 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xklhn\" (UniqueName: \"kubernetes.io/projected/16a9a790-55c8-4924-ae4d-c788238f8211-kube-api-access-xklhn\") pod \"barbican-api-5969866c74-lgff2\" (UID: \"16a9a790-55c8-4924-ae4d-c788238f8211\") " pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.018262 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.061134 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079159 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079233 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzxxr\" (UniqueName: \"kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079540 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079582 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079774 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.079834 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id\") pod \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\" (UID: \"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad\") " Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.080010 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.080374 4967 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.090825 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts" (OuterVolumeSpecName: "scripts") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.094431 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr" (OuterVolumeSpecName: "kube-api-access-hzxxr") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "kube-api-access-hzxxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.096081 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.120671 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.156181 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data" (OuterVolumeSpecName: "config-data") pod "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" (UID: "71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.181577 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.181599 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.181610 4967 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.181618 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.181627 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzxxr\" (UniqueName: \"kubernetes.io/projected/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad-kube-api-access-hzxxr\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.660865 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5969866c74-lgff2"] Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.686265 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" event={"ID":"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc","Type":"ContainerStarted","Data":"569ff6dc54e3b78749c77f978e53e0bd1dda6d502f8381fe4d917ad663d656ce"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.686367 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.692336 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bb45fb999-wtdcm" event={"ID":"166a1693-08af-47c6-a9b7-283fb1edfc10","Type":"ContainerStarted","Data":"bf9414934b42d6ce6490c81480e2adb02878f19d76025c8c23bfdcda916382cc"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.692392 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-bb45fb999-wtdcm" event={"ID":"166a1693-08af-47c6-a9b7-283fb1edfc10","Type":"ContainerStarted","Data":"5d59eb270b876c9e6e77ef3e9ecb23a4605366ffad0c798c34232b0b704f1bb5"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.695764 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" event={"ID":"4ffbf986-245c-45b4-b6e1-544c887362be","Type":"ContainerStarted","Data":"2109a1b957b6ae34a69d89b7c9dfbdef16aee709c7d07225053523a7bafc7f73"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.711021 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-pjq5x" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.711366 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-pjq5x" event={"ID":"71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad","Type":"ContainerDied","Data":"c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.711407 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9719b63508e1ab0ec63e25c022a3ed6b20385114ae64642e88fe44c794d4013" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.722222 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" podStartSLOduration=4.72220046 podStartE2EDuration="4.72220046s" podCreationTimestamp="2025-11-21 15:56:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:42.705338203 +0000 UTC m=+1290.963859211" watchObservedRunningTime="2025-11-21 15:56:42.72220046 +0000 UTC m=+1290.980721468" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.754478 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0b6e974-ebcc-4421-879e-d711bd689855" containerID="78177d37dda4a777255af8266702bdde12386a0ca9f3803603cfcaf6a78fe6e1" exitCode=0 Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.754558 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerDied","Data":"78177d37dda4a777255af8266702bdde12386a0ca9f3803603cfcaf6a78fe6e1"} Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.766262 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-bb45fb999-wtdcm" podStartSLOduration=2.83628331 podStartE2EDuration="4.766241422s" podCreationTimestamp="2025-11-21 15:56:38 +0000 UTC" firstStartedPulling="2025-11-21 15:56:39.722374797 +0000 UTC m=+1287.980895805" lastFinishedPulling="2025-11-21 15:56:41.652332909 +0000 UTC m=+1289.910853917" observedRunningTime="2025-11-21 15:56:42.731096657 +0000 UTC m=+1290.989617665" watchObservedRunningTime="2025-11-21 15:56:42.766241422 +0000 UTC m=+1291.024762430" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.799814 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" podStartSLOduration=2.852621992 podStartE2EDuration="4.79976544s" podCreationTimestamp="2025-11-21 15:56:38 +0000 UTC" firstStartedPulling="2025-11-21 15:56:39.704156251 +0000 UTC m=+1287.962677259" lastFinishedPulling="2025-11-21 15:56:41.651299699 +0000 UTC m=+1289.909820707" observedRunningTime="2025-11-21 15:56:42.755384508 +0000 UTC m=+1291.013905536" watchObservedRunningTime="2025-11-21 15:56:42.79976544 +0000 UTC m=+1291.058286458" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.928414 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:42 crc kubenswrapper[4967]: E1121 15:56:42.928987 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" containerName="cinder-db-sync" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.929013 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" containerName="cinder-db-sync" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.929382 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" containerName="cinder-db-sync" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.935068 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.938468 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.940848 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.941047 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.941184 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-wp75j" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.941335 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 15:56:42 crc kubenswrapper[4967]: I1121 15:56:42.944064 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.043571 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083124 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:56:43 crc kubenswrapper[4967]: E1121 15:56:43.083673 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="proxy-httpd" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083706 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="proxy-httpd" Nov 21 15:56:43 crc kubenswrapper[4967]: E1121 15:56:43.083760 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="sg-core" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083770 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="sg-core" Nov 21 15:56:43 crc kubenswrapper[4967]: E1121 15:56:43.083784 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-central-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083791 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-central-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: E1121 15:56:43.083804 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-notification-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083809 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-notification-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.083997 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-notification-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.084026 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="sg-core" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.084038 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="ceilometer-central-agent" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.084050 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" containerName="proxy-httpd" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.085426 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.092660 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121198 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121338 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121445 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd6c2\" (UniqueName: \"kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121481 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121570 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121598 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.121646 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts\") pod \"a0b6e974-ebcc-4421-879e-d711bd689855\" (UID: \"a0b6e974-ebcc-4421-879e-d711bd689855\") " Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122028 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122122 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122180 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122219 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122284 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122425 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.122304 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dldwn\" (UniqueName: \"kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.123340 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.124341 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.149271 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts" (OuterVolumeSpecName: "scripts") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.149474 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2" (OuterVolumeSpecName: "kube-api-access-qd6c2") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "kube-api-access-qd6c2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.214173 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.216102 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.221642 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.225560 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.225673 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.225752 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.225847 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sswjz\" (UniqueName: \"kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.225919 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226020 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226113 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226221 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226402 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226484 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226559 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dldwn\" (UniqueName: \"kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226673 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.226829 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.227120 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd6c2\" (UniqueName: \"kubernetes.io/projected/a0b6e974-ebcc-4421-879e-d711bd689855-kube-api-access-qd6c2\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.227230 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a0b6e974-ebcc-4421-879e-d711bd689855-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.227294 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.229498 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.246287 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.259283 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.260372 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.264789 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dldwn\" (UniqueName: \"kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn\") pod \"cinder-scheduler-0\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.273074 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.293439 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.300690 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.320426 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329149 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329251 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329281 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329383 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329428 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329507 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329561 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329623 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329670 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc85t\" (UniqueName: \"kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329725 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.329752 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.331574 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sswjz\" (UniqueName: \"kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.331638 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.331873 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.331895 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.332791 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.333603 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.333973 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.334304 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.335359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.360103 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sswjz\" (UniqueName: \"kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz\") pod \"dnsmasq-dns-5cc8b5d5c5-vbvm5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.368479 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data" (OuterVolumeSpecName: "config-data") pod "a0b6e974-ebcc-4421-879e-d711bd689855" (UID: "a0b6e974-ebcc-4421-879e-d711bd689855"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.433937 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.433991 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc85t\" (UniqueName: \"kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434108 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434125 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434177 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434253 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434294 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434384 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0b6e974-ebcc-4421-879e-d711bd689855-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.434423 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.435440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.439452 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.441347 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.449019 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.451922 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.461825 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc85t\" (UniqueName: \"kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t\") pod \"cinder-api-0\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.501676 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.546043 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.767286 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-8784d986b-zqb7r" event={"ID":"4ffbf986-245c-45b4-b6e1-544c887362be","Type":"ContainerStarted","Data":"a189d440914e77ede748080d03d72a4e48dc2acc722260c172cd27952fb1822c"} Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.770610 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5969866c74-lgff2" event={"ID":"16a9a790-55c8-4924-ae4d-c788238f8211","Type":"ContainerStarted","Data":"5c37424c6557cb7e6218033476cab83b6594201989ca9b978020f5bb4af4ef9b"} Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.770649 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5969866c74-lgff2" event={"ID":"16a9a790-55c8-4924-ae4d-c788238f8211","Type":"ContainerStarted","Data":"f47e42eab0e37c80870836ef440007ee79e55fc27b4d8df6aa83c1cf283595b5"} Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.770664 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5969866c74-lgff2" event={"ID":"16a9a790-55c8-4924-ae4d-c788238f8211","Type":"ContainerStarted","Data":"87cabd17e961ef48b533d139e8eba63a73392b605c54488415804c41ee149869"} Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.770914 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.770958 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.776386 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.776516 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a0b6e974-ebcc-4421-879e-d711bd689855","Type":"ContainerDied","Data":"c289be9cd5b37960b44f7038fc7dd73dcd099930d83da135fbf1051b63e7f943"} Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.776584 4967 scope.go:117] "RemoveContainer" containerID="d0bcbf5be17949197a012292ccc9c3a46fca27e231adb76db30fe8caa49b4fc3" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.796500 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5969866c74-lgff2" podStartSLOduration=2.796480418 podStartE2EDuration="2.796480418s" podCreationTimestamp="2025-11-21 15:56:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:43.792655948 +0000 UTC m=+1292.051176976" watchObservedRunningTime="2025-11-21 15:56:43.796480418 +0000 UTC m=+1292.055001426" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.843506 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.867410 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.915006 4967 scope.go:117] "RemoveContainer" containerID="85ae66797cfcf9e867815a993b11fc9b91d45bafecfafd6336cc2fbe8be173f8" Nov 21 15:56:43 crc kubenswrapper[4967]: W1121 15:56:43.918056 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83e7fc7b_7040_4fab_a499_fb9bf9c0fa92.slice/crio-9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56 WatchSource:0}: Error finding container 9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56: Status 404 returned error can't find the container with id 9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56 Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.932766 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.963378 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.966895 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.978377 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.978567 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:56:43 crc kubenswrapper[4967]: I1121 15:56:43.994278 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.014557 4967 scope.go:117] "RemoveContainer" containerID="78177d37dda4a777255af8266702bdde12386a0ca9f3803603cfcaf6a78fe6e1" Nov 21 15:56:44 crc kubenswrapper[4967]: W1121 15:56:44.025337 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5527dd0f_b1df_4a90_846b_43dd6319bfa5.slice/crio-bf3f46237d00dd8265f163bfb23f3247b48950cc5e227a4d1fbb1580d31aef50 WatchSource:0}: Error finding container bf3f46237d00dd8265f163bfb23f3247b48950cc5e227a4d1fbb1580d31aef50: Status 404 returned error can't find the container with id bf3f46237d00dd8265f163bfb23f3247b48950cc5e227a4d1fbb1580d31aef50 Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.028401 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065492 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065798 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cr9rq\" (UniqueName: \"kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065848 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065873 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065914 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.065939 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.066333 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168407 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168471 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168538 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168682 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168728 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cr9rq\" (UniqueName: \"kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168785 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.168829 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.169327 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.169465 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.173303 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.175630 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.177335 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.181606 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.186036 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cr9rq\" (UniqueName: \"kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq\") pod \"ceilometer-0\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.203636 4967 scope.go:117] "RemoveContainer" containerID="4a6099baad164d23e4b74d57e9a05aea89e6b80a4d326a9d329049a33a1b0610" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.219126 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.315009 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.564842 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0b6e974-ebcc-4421-879e-d711bd689855" path="/var/lib/kubelet/pods/a0b6e974-ebcc-4421-879e-d711bd689855/volumes" Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.794197 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" event={"ID":"5527dd0f-b1df-4a90-846b-43dd6319bfa5","Type":"ContainerStarted","Data":"bf3f46237d00dd8265f163bfb23f3247b48950cc5e227a4d1fbb1580d31aef50"} Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.797993 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerStarted","Data":"7900a3e49a7ba61ccdabe5f3c82940630c46daeb3a9bb6c6edfafb71294de37c"} Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.801152 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerStarted","Data":"9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56"} Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.801680 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="dnsmasq-dns" containerID="cri-o://569ff6dc54e3b78749c77f978e53e0bd1dda6d502f8381fe4d917ad663d656ce" gracePeriod=10 Nov 21 15:56:44 crc kubenswrapper[4967]: I1121 15:56:44.871083 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.360272 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.812868 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerStarted","Data":"fc6060a83782e39b40ef1baa5d556eece025fe0063f51f2f6d9f6b718e77ae97"} Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.826779 4967 generic.go:334] "Generic (PLEG): container finished" podID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerID="569ff6dc54e3b78749c77f978e53e0bd1dda6d502f8381fe4d917ad663d656ce" exitCode=0 Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.826843 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" event={"ID":"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc","Type":"ContainerDied","Data":"569ff6dc54e3b78749c77f978e53e0bd1dda6d502f8381fe4d917ad663d656ce"} Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.842855 4967 generic.go:334] "Generic (PLEG): container finished" podID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerID="6d1010d2ce2e9b54203f3c10fc78cd1e95689563030b05991037db1dcd3cf726" exitCode=0 Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.844017 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" event={"ID":"5527dd0f-b1df-4a90-846b-43dd6319bfa5","Type":"ContainerDied","Data":"6d1010d2ce2e9b54203f3c10fc78cd1e95689563030b05991037db1dcd3cf726"} Nov 21 15:56:45 crc kubenswrapper[4967]: I1121 15:56:45.855276 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerStarted","Data":"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.032534 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126053 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg76s\" (UniqueName: \"kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126445 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126566 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126637 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126683 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.126710 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config\") pod \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\" (UID: \"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc\") " Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.135665 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s" (OuterVolumeSpecName: "kube-api-access-sg76s") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "kube-api-access-sg76s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.234609 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg76s\" (UniqueName: \"kubernetes.io/projected/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-kube-api-access-sg76s\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.238080 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.243783 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.247510 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.252621 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.272721 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config" (OuterVolumeSpecName: "config") pod "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" (UID: "bae95bc3-2fe5-4a3e-86d6-eba75a6220bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.340403 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.340436 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.340447 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.340455 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.340464 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.872015 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerStarted","Data":"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.872718 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api-log" containerID="cri-o://22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" gracePeriod=30 Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.872958 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.873217 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api" containerID="cri-o://3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" gracePeriod=30 Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.877068 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerStarted","Data":"6e7f14a0cd1f82e6eb7173240eba239b9bcd3aea7c98e320171fbe2c72645fe8"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.884732 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerStarted","Data":"7be10394e739b35ce1e663f218abef8e4a2ddd5a257751c4cd52cce365cd4b2b"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.887148 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" event={"ID":"bae95bc3-2fe5-4a3e-86d6-eba75a6220bc","Type":"ContainerDied","Data":"55e30b7d3a3e9522f1f5e3e87ccb387732460f31c98ddc169152928e030208b8"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.887186 4967 scope.go:117] "RemoveContainer" containerID="569ff6dc54e3b78749c77f978e53e0bd1dda6d502f8381fe4d917ad663d656ce" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.887376 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c67bffd47-t49fk" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.894189 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.8941733689999998 podStartE2EDuration="3.894173369s" podCreationTimestamp="2025-11-21 15:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:46.89179455 +0000 UTC m=+1295.150315578" watchObservedRunningTime="2025-11-21 15:56:46.894173369 +0000 UTC m=+1295.152694387" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.906065 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" event={"ID":"5527dd0f-b1df-4a90-846b-43dd6319bfa5","Type":"ContainerStarted","Data":"87a6628c2abd3227316091c58c41a52a4c03279bb98e7ff5369006ab99066d03"} Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.906401 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:46 crc kubenswrapper[4967]: I1121 15:56:46.927757 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" podStartSLOduration=3.927740379 podStartE2EDuration="3.927740379s" podCreationTimestamp="2025-11-21 15:56:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:46.926220735 +0000 UTC m=+1295.184741743" watchObservedRunningTime="2025-11-21 15:56:46.927740379 +0000 UTC m=+1295.186261387" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.191588 4967 scope.go:117] "RemoveContainer" containerID="c99e39cda58c84136c9ee2399ffe7b86a3c9f66705d7ac9013e3c498414cb656" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.208173 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.237009 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c67bffd47-t49fk"] Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.491974 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.563710 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.563870 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.563908 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.563932 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.564098 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc85t\" (UniqueName: \"kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.564210 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.564268 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.564300 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts\") pod \"2decb59c-2387-440c-9a19-203ab519d02b\" (UID: \"2decb59c-2387-440c-9a19-203ab519d02b\") " Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.565235 4967 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2decb59c-2387-440c-9a19-203ab519d02b-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.567092 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs" (OuterVolumeSpecName: "logs") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.572169 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t" (OuterVolumeSpecName: "kube-api-access-bc85t") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "kube-api-access-bc85t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.574433 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.574638 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts" (OuterVolumeSpecName: "scripts") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.605841 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.623686 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data" (OuterVolumeSpecName: "config-data") pod "2decb59c-2387-440c-9a19-203ab519d02b" (UID: "2decb59c-2387-440c-9a19-203ab519d02b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667232 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2decb59c-2387-440c-9a19-203ab519d02b-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667265 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667277 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc85t\" (UniqueName: \"kubernetes.io/projected/2decb59c-2387-440c-9a19-203ab519d02b-kube-api-access-bc85t\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667289 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667299 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.667322 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2decb59c-2387-440c-9a19-203ab519d02b-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922041 4967 generic.go:334] "Generic (PLEG): container finished" podID="2decb59c-2387-440c-9a19-203ab519d02b" containerID="3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" exitCode=0 Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922075 4967 generic.go:334] "Generic (PLEG): container finished" podID="2decb59c-2387-440c-9a19-203ab519d02b" containerID="22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" exitCode=143 Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922097 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922133 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerDied","Data":"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318"} Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922183 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerDied","Data":"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc"} Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922199 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2decb59c-2387-440c-9a19-203ab519d02b","Type":"ContainerDied","Data":"7900a3e49a7ba61ccdabe5f3c82940630c46daeb3a9bb6c6edfafb71294de37c"} Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.922219 4967 scope.go:117] "RemoveContainer" containerID="3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.924447 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerStarted","Data":"48812ecedfc0e84bf063bb60b551c2d4eb966005a62cd2248b1b1a54305a527d"} Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.932373 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerStarted","Data":"42cda58e8125226d31e745936b86915184a64bc5b49a6e9322d2439fcfc09688"} Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.950096 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.35166356 podStartE2EDuration="5.950075266s" podCreationTimestamp="2025-11-21 15:56:42 +0000 UTC" firstStartedPulling="2025-11-21 15:56:43.934600608 +0000 UTC m=+1292.193121616" lastFinishedPulling="2025-11-21 15:56:45.533012314 +0000 UTC m=+1293.791533322" observedRunningTime="2025-11-21 15:56:47.949868311 +0000 UTC m=+1296.208389329" watchObservedRunningTime="2025-11-21 15:56:47.950075266 +0000 UTC m=+1296.208596274" Nov 21 15:56:47 crc kubenswrapper[4967]: I1121 15:56:47.988084 4967 scope.go:117] "RemoveContainer" containerID="22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.002987 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.012451 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.027934 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.028459 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="dnsmasq-dns" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.028484 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="dnsmasq-dns" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.028516 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api-log" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.028526 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api-log" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.028599 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.028609 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.028623 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="init" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.028631 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="init" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.029004 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api-log" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.029037 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2decb59c-2387-440c-9a19-203ab519d02b" containerName="cinder-api" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.029070 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" containerName="dnsmasq-dns" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.030625 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.036069 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.036291 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.036441 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.038137 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.057839 4967 scope.go:117] "RemoveContainer" containerID="3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.059479 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318\": container with ID starting with 3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318 not found: ID does not exist" containerID="3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.059532 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318"} err="failed to get container status \"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318\": rpc error: code = NotFound desc = could not find container \"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318\": container with ID starting with 3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318 not found: ID does not exist" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.059567 4967 scope.go:117] "RemoveContainer" containerID="22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.059886 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc\": container with ID starting with 22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc not found: ID does not exist" containerID="22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.059913 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc"} err="failed to get container status \"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc\": rpc error: code = NotFound desc = could not find container \"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc\": container with ID starting with 22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc not found: ID does not exist" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.059934 4967 scope.go:117] "RemoveContainer" containerID="3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.060147 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318"} err="failed to get container status \"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318\": rpc error: code = NotFound desc = could not find container \"3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318\": container with ID starting with 3f3566cce4690e025b75c7523004a9d75f591499707d7f17fbdaba1846e0e318 not found: ID does not exist" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.060167 4967 scope.go:117] "RemoveContainer" containerID="22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.060360 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc"} err="failed to get container status \"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc\": rpc error: code = NotFound desc = could not find container \"22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc\": container with ID starting with 22415c54ee9f0cac0e911bff0ce2f04336c5b01954fe820822d6430bc5f88cfc not found: ID does not exist" Nov 21 15:56:48 crc kubenswrapper[4967]: E1121 15:56:48.141149 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2decb59c_2387_440c_9a19_203ab519d02b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2decb59c_2387_440c_9a19_203ab519d02b.slice/crio-7900a3e49a7ba61ccdabe5f3c82940630c46daeb3a9bb6c6edfafb71294de37c\": RecentStats: unable to find data in memory cache]" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.178901 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-scripts\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.178981 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179000 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179202 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57e505d7-beb3-43ca-a03d-c5ae00347bc0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179332 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data-custom\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179390 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r5pc\" (UniqueName: \"kubernetes.io/projected/57e505d7-beb3-43ca-a03d-c5ae00347bc0-kube-api-access-6r5pc\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179417 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179477 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57e505d7-beb3-43ca-a03d-c5ae00347bc0-logs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.179502 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281548 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57e505d7-beb3-43ca-a03d-c5ae00347bc0-logs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281602 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281666 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-scripts\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281709 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281731 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281776 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57e505d7-beb3-43ca-a03d-c5ae00347bc0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281800 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data-custom\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281834 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r5pc\" (UniqueName: \"kubernetes.io/projected/57e505d7-beb3-43ca-a03d-c5ae00347bc0-kube-api-access-6r5pc\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.281862 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.284565 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/57e505d7-beb3-43ca-a03d-c5ae00347bc0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.284703 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57e505d7-beb3-43ca-a03d-c5ae00347bc0-logs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.287230 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-scripts\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.287460 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data-custom\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.289143 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.292061 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-68456f646b-s7b7f" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.295632 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.300382 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-config-data\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.300920 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r5pc\" (UniqueName: \"kubernetes.io/projected/57e505d7-beb3-43ca-a03d-c5ae00347bc0-kube-api-access-6r5pc\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.300989 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.305202 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57e505d7-beb3-43ca-a03d-c5ae00347bc0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"57e505d7-beb3-43ca-a03d-c5ae00347bc0\") " pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.351754 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.554130 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2decb59c-2387-440c-9a19-203ab519d02b" path="/var/lib/kubelet/pods/2decb59c-2387-440c-9a19-203ab519d02b/volumes" Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.555459 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bae95bc3-2fe5-4a3e-86d6-eba75a6220bc" path="/var/lib/kubelet/pods/bae95bc3-2fe5-4a3e-86d6-eba75a6220bc/volumes" Nov 21 15:56:48 crc kubenswrapper[4967]: W1121 15:56:48.823436 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57e505d7_beb3_43ca_a03d_c5ae00347bc0.slice/crio-efa71a7430828241f81d92e78296606b11bed8859a421ad046418b20076f8ab1 WatchSource:0}: Error finding container efa71a7430828241f81d92e78296606b11bed8859a421ad046418b20076f8ab1: Status 404 returned error can't find the container with id efa71a7430828241f81d92e78296606b11bed8859a421ad046418b20076f8ab1 Nov 21 15:56:48 crc kubenswrapper[4967]: I1121 15:56:48.838909 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 15:56:49 crc kubenswrapper[4967]: I1121 15:56:49.025788 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerStarted","Data":"5ff7134e6aee4fbea9effdf90993ee33422c1178643d8badd53a1301364d5b9b"} Nov 21 15:56:49 crc kubenswrapper[4967]: I1121 15:56:49.034560 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"57e505d7-beb3-43ca-a03d-c5ae00347bc0","Type":"ContainerStarted","Data":"efa71a7430828241f81d92e78296606b11bed8859a421ad046418b20076f8ab1"} Nov 21 15:56:50 crc kubenswrapper[4967]: I1121 15:56:50.067565 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"57e505d7-beb3-43ca-a03d-c5ae00347bc0","Type":"ContainerStarted","Data":"7df72739eb480e816646fc063df5f899002ecd7065d8d6280c128e196d870f94"} Nov 21 15:56:51 crc kubenswrapper[4967]: I1121 15:56:51.096755 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"57e505d7-beb3-43ca-a03d-c5ae00347bc0","Type":"ContainerStarted","Data":"1384a325e8566c389b8dfd4793acce8a71327947cfd57ac52136b42f568d5d64"} Nov 21 15:56:51 crc kubenswrapper[4967]: I1121 15:56:51.097251 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 15:56:51 crc kubenswrapper[4967]: I1121 15:56:51.139684 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.13964075 podStartE2EDuration="4.13964075s" podCreationTimestamp="2025-11-21 15:56:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:51.122232127 +0000 UTC m=+1299.380753135" watchObservedRunningTime="2025-11-21 15:56:51.13964075 +0000 UTC m=+1299.398161768" Nov 21 15:56:51 crc kubenswrapper[4967]: I1121 15:56:51.223329 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:51 crc kubenswrapper[4967]: I1121 15:56:51.302198 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:52 crc kubenswrapper[4967]: I1121 15:56:52.109208 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerStarted","Data":"020bef81047c8008782c5042de065aabb0805f186829f78f37a67754b692a572"} Nov 21 15:56:52 crc kubenswrapper[4967]: I1121 15:56:52.129372 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.880368447 podStartE2EDuration="9.129356776s" podCreationTimestamp="2025-11-21 15:56:43 +0000 UTC" firstStartedPulling="2025-11-21 15:56:44.882377082 +0000 UTC m=+1293.140898090" lastFinishedPulling="2025-11-21 15:56:51.131365411 +0000 UTC m=+1299.389886419" observedRunningTime="2025-11-21 15:56:52.128081939 +0000 UTC m=+1300.386602947" watchObservedRunningTime="2025-11-21 15:56:52.129356776 +0000 UTC m=+1300.387877774" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.127338 4967 generic.go:334] "Generic (PLEG): container finished" podID="db63398d-117e-4a60-b548-e1684dbef263" containerID="5a68a1617fd748c8f26d252405e183a72786bedb5ce9ad831364ec556cf7f9c8" exitCode=0 Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.127456 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tcdk8" event={"ID":"db63398d-117e-4a60-b548-e1684dbef263","Type":"ContainerDied","Data":"5a68a1617fd748c8f26d252405e183a72786bedb5ce9ad831364ec556cf7f9c8"} Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.128230 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.504447 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.618507 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.619078 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="dnsmasq-dns" containerID="cri-o://4b2594431cca1d7af0cbb78911696cae035b16c57828192ffb2f4c4c8accf0f0" gracePeriod=10 Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.706993 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.816004 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5969866c74-lgff2" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.824571 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.893280 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.893526 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68456f646b-s7b7f" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" containerID="cri-o://e4209bb0fb9ffacd5e770f3fe263b00db4c9df6775c3bc6150c23ab4302d1f08" gracePeriod=30 Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.893973 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68456f646b-s7b7f" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api" containerID="cri-o://b7a1cad6be9818a65e19e387824c52a515e65e96296ea132b987ac65a6778cd9" gracePeriod=30 Nov 21 15:56:53 crc kubenswrapper[4967]: I1121 15:56:53.908048 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.141068 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerID="e4209bb0fb9ffacd5e770f3fe263b00db4c9df6775c3bc6150c23ab4302d1f08" exitCode=143 Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.141123 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerDied","Data":"e4209bb0fb9ffacd5e770f3fe263b00db4c9df6775c3bc6150c23ab4302d1f08"} Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.143983 4967 generic.go:334] "Generic (PLEG): container finished" podID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerID="4b2594431cca1d7af0cbb78911696cae035b16c57828192ffb2f4c4c8accf0f0" exitCode=0 Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.144019 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" event={"ID":"036b7ea7-f134-4986-a4ae-ca8725f40ee6","Type":"ContainerDied","Data":"4b2594431cca1d7af0cbb78911696cae035b16c57828192ffb2f4c4c8accf0f0"} Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.145384 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="cinder-scheduler" containerID="cri-o://6e7f14a0cd1f82e6eb7173240eba239b9bcd3aea7c98e320171fbe2c72645fe8" gracePeriod=30 Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.145505 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="probe" containerID="cri-o://48812ecedfc0e84bf063bb60b551c2d4eb966005a62cd2248b1b1a54305a527d" gracePeriod=30 Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.628408 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.723450 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.742990 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config\") pod \"db63398d-117e-4a60-b548-e1684dbef263\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.743062 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle\") pod \"db63398d-117e-4a60-b548-e1684dbef263\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.743185 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8d95\" (UniqueName: \"kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95\") pod \"db63398d-117e-4a60-b548-e1684dbef263\" (UID: \"db63398d-117e-4a60-b548-e1684dbef263\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.753642 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95" (OuterVolumeSpecName: "kube-api-access-d8d95") pod "db63398d-117e-4a60-b548-e1684dbef263" (UID: "db63398d-117e-4a60-b548-e1684dbef263"). InnerVolumeSpecName "kube-api-access-d8d95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.803999 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db63398d-117e-4a60-b548-e1684dbef263" (UID: "db63398d-117e-4a60-b548-e1684dbef263"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.804951 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config" (OuterVolumeSpecName: "config") pod "db63398d-117e-4a60-b548-e1684dbef263" (UID: "db63398d-117e-4a60-b548-e1684dbef263"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845401 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845473 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845495 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mwj6\" (UniqueName: \"kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845543 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845922 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.845956 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0\") pod \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\" (UID: \"036b7ea7-f134-4986-a4ae-ca8725f40ee6\") " Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.846550 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8d95\" (UniqueName: \"kubernetes.io/projected/db63398d-117e-4a60-b548-e1684dbef263-kube-api-access-d8d95\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.846566 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.846575 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db63398d-117e-4a60-b548-e1684dbef263-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.855646 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6" (OuterVolumeSpecName: "kube-api-access-5mwj6") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "kube-api-access-5mwj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.905286 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.911712 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config" (OuterVolumeSpecName: "config") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.914548 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.917562 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.918150 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "036b7ea7-f134-4986-a4ae-ca8725f40ee6" (UID: "036b7ea7-f134-4986-a4ae-ca8725f40ee6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.948808 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.948997 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mwj6\" (UniqueName: \"kubernetes.io/projected/036b7ea7-f134-4986-a4ae-ca8725f40ee6-kube-api-access-5mwj6\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.949061 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.949115 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.949175 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:54 crc kubenswrapper[4967]: I1121 15:56:54.949232 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/036b7ea7-f134-4986-a4ae-ca8725f40ee6-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.160898 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" event={"ID":"036b7ea7-f134-4986-a4ae-ca8725f40ee6","Type":"ContainerDied","Data":"484f4c305cf608eb1f21d145bf5412abb972e5d29fb680b2945afe7c459bed4f"} Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.162135 4967 scope.go:117] "RemoveContainer" containerID="4b2594431cca1d7af0cbb78911696cae035b16c57828192ffb2f4c4c8accf0f0" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.162434 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-blfrj" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.169432 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tcdk8" event={"ID":"db63398d-117e-4a60-b548-e1684dbef263","Type":"ContainerDied","Data":"b07b691626b627134916d82d90444b4cd84fa91c70bdb709081a35f93e031976"} Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.169484 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b07b691626b627134916d82d90444b4cd84fa91c70bdb709081a35f93e031976" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.169557 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tcdk8" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.195539 4967 generic.go:334] "Generic (PLEG): container finished" podID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerID="48812ecedfc0e84bf063bb60b551c2d4eb966005a62cd2248b1b1a54305a527d" exitCode=0 Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.195678 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerDied","Data":"48812ecedfc0e84bf063bb60b551c2d4eb966005a62cd2248b1b1a54305a527d"} Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.236223 4967 scope.go:117] "RemoveContainer" containerID="78d254d881303628bb2863105ae549ca19b43743bcbd3024526af923092b9aed" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.237422 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.280731 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-blfrj"] Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.337339 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:56:55 crc kubenswrapper[4967]: E1121 15:56:55.337932 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="init" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.337954 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="init" Nov 21 15:56:55 crc kubenswrapper[4967]: E1121 15:56:55.337991 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db63398d-117e-4a60-b548-e1684dbef263" containerName="neutron-db-sync" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.338001 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="db63398d-117e-4a60-b548-e1684dbef263" containerName="neutron-db-sync" Nov 21 15:56:55 crc kubenswrapper[4967]: E1121 15:56:55.338034 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="dnsmasq-dns" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.338044 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="dnsmasq-dns" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.338300 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="db63398d-117e-4a60-b548-e1684dbef263" containerName="neutron-db-sync" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.338349 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" containerName="dnsmasq-dns" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.339843 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.371423 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461481 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461549 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6tq9\" (UniqueName: \"kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461629 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461656 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461725 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.461819 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.475436 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.479218 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.506981 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.509929 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.510028 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.510486 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.511724 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-rp7j4" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563456 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563532 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6tq9\" (UniqueName: \"kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563596 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srjwm\" (UniqueName: \"kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563648 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563689 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563710 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563732 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563790 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563825 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.563884 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.564746 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.565101 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.565985 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.566771 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.568541 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.570810 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.602533 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6tq9\" (UniqueName: \"kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9\") pod \"dnsmasq-dns-6578955fd5-n4kt7\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.667368 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.673516 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.673639 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.673755 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srjwm\" (UniqueName: \"kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.673784 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.673838 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.680592 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.683081 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.687001 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.693544 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.703404 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srjwm\" (UniqueName: \"kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm\") pod \"neutron-5859ff54bd-hmqvr\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:55 crc kubenswrapper[4967]: I1121 15:56:55.858202 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.223550 4967 generic.go:334] "Generic (PLEG): container finished" podID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerID="6e7f14a0cd1f82e6eb7173240eba239b9bcd3aea7c98e320171fbe2c72645fe8" exitCode=0 Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.223753 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerDied","Data":"6e7f14a0cd1f82e6eb7173240eba239b9bcd3aea7c98e320171fbe2c72645fe8"} Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.223976 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92","Type":"ContainerDied","Data":"9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56"} Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.224002 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bec053044f4c503ee48c0810bd74cd46b96f99e8c74e2fe61edb558a253ab56" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.236886 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286583 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286628 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286696 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286783 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286833 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dldwn\" (UniqueName: \"kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.286908 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom\") pod \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\" (UID: \"83e7fc7b-7040-4fab-a499-fb9bf9c0fa92\") " Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.287562 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.306479 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts" (OuterVolumeSpecName: "scripts") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.306662 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn" (OuterVolumeSpecName: "kube-api-access-dldwn") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "kube-api-access-dldwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.315026 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.360660 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.390057 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dldwn\" (UniqueName: \"kubernetes.io/projected/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-kube-api-access-dldwn\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.390100 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.390112 4967 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.390124 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.451424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.493926 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.531451 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data" (OuterVolumeSpecName: "config-data") pod "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" (UID: "83e7fc7b-7040-4fab-a499-fb9bf9c0fa92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.556028 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="036b7ea7-f134-4986-a4ae-ca8725f40ee6" path="/var/lib/kubelet/pods/036b7ea7-f134-4986-a4ae-ca8725f40ee6/volumes" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.596615 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:56 crc kubenswrapper[4967]: I1121 15:56:56.620916 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.083221 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68456f646b-s7b7f" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.195:9311/healthcheck\": read tcp 10.217.0.2:47216->10.217.0.195:9311: read: connection reset by peer" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.083806 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68456f646b-s7b7f" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.195:9311/healthcheck\": read tcp 10.217.0.2:47206->10.217.0.195:9311: read: connection reset by peer" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.244950 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerStarted","Data":"b70af11e3e863da51b383110c505b812605a8d1ed939cad3719295564dd19018"} Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.244991 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerStarted","Data":"b73de64291624c449e3db90ffbad63a6feea0d94ddefc402ece2357ac9267b84"} Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.249062 4967 generic.go:334] "Generic (PLEG): container finished" podID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerID="b7a1cad6be9818a65e19e387824c52a515e65e96296ea132b987ac65a6778cd9" exitCode=0 Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.249155 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerDied","Data":"b7a1cad6be9818a65e19e387824c52a515e65e96296ea132b987ac65a6778cd9"} Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.251616 4967 generic.go:334] "Generic (PLEG): container finished" podID="12732ea1-9536-4f66-8a18-14aec233a88a" containerID="1c26fb594847a70a2ad68de6745a6cfd994ef1dd03d24b73ff20f9fc64b12ba8" exitCode=0 Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.251687 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" event={"ID":"12732ea1-9536-4f66-8a18-14aec233a88a","Type":"ContainerDied","Data":"1c26fb594847a70a2ad68de6745a6cfd994ef1dd03d24b73ff20f9fc64b12ba8"} Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.251707 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.251732 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" event={"ID":"12732ea1-9536-4f66-8a18-14aec233a88a","Type":"ContainerStarted","Data":"e1daaee4164a288422207cf90b306d25e3b9b0ff968b0bb873cb29838f724d5f"} Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.383705 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.399036 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.424436 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:57 crc kubenswrapper[4967]: E1121 15:56:57.424929 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="cinder-scheduler" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.424942 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="cinder-scheduler" Nov 21 15:56:57 crc kubenswrapper[4967]: E1121 15:56:57.424989 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="probe" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.424997 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="probe" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.425250 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="cinder-scheduler" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.425327 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" containerName="probe" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.426554 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.432065 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.438561 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.537656 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.625774 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.626173 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.626240 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.626300 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c76f304-d7ff-4488-bf08-228d143dae3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.626347 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf2ks\" (UniqueName: \"kubernetes.io/projected/2c76f304-d7ff-4488-bf08-228d143dae3d-kube-api-access-rf2ks\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.626386 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.728049 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle\") pod \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.728331 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data\") pod \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.728577 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom\") pod \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.728816 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs\") pod \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.728919 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdm42\" (UniqueName: \"kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42\") pod \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\" (UID: \"a0d4a6bc-2860-495d-8fa4-76668ddcbcec\") " Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.729263 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs" (OuterVolumeSpecName: "logs") pod "a0d4a6bc-2860-495d-8fa4-76668ddcbcec" (UID: "a0d4a6bc-2860-495d-8fa4-76668ddcbcec"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.729596 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.729750 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.729918 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.730037 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c76f304-d7ff-4488-bf08-228d143dae3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.730114 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf2ks\" (UniqueName: \"kubernetes.io/projected/2c76f304-d7ff-4488-bf08-228d143dae3d-kube-api-access-rf2ks\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.730262 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.730925 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.733423 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2c76f304-d7ff-4488-bf08-228d143dae3d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.742136 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.743945 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a0d4a6bc-2860-495d-8fa4-76668ddcbcec" (UID: "a0d4a6bc-2860-495d-8fa4-76668ddcbcec"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.744040 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.744245 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42" (OuterVolumeSpecName: "kube-api-access-qdm42") pod "a0d4a6bc-2860-495d-8fa4-76668ddcbcec" (UID: "a0d4a6bc-2860-495d-8fa4-76668ddcbcec"). InnerVolumeSpecName "kube-api-access-qdm42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.756222 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-config-data\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.759780 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c76f304-d7ff-4488-bf08-228d143dae3d-scripts\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.765072 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf2ks\" (UniqueName: \"kubernetes.io/projected/2c76f304-d7ff-4488-bf08-228d143dae3d-kube-api-access-rf2ks\") pod \"cinder-scheduler-0\" (UID: \"2c76f304-d7ff-4488-bf08-228d143dae3d\") " pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.785838 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.808945 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a0d4a6bc-2860-495d-8fa4-76668ddcbcec" (UID: "a0d4a6bc-2860-495d-8fa4-76668ddcbcec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.868454 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.868509 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.868527 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdm42\" (UniqueName: \"kubernetes.io/projected/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-kube-api-access-qdm42\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.879678 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5f8c4b98b5-tmhs4"] Nov 21 15:56:57 crc kubenswrapper[4967]: E1121 15:56:57.880758 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.880778 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" Nov 21 15:56:57 crc kubenswrapper[4967]: E1121 15:56:57.880807 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.880814 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.881260 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api-log" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.881292 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" containerName="barbican-api" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.884470 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.888551 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.890357 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.923377 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f8c4b98b5-tmhs4"] Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.934562 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data" (OuterVolumeSpecName: "config-data") pod "a0d4a6bc-2860-495d-8fa4-76668ddcbcec" (UID: "a0d4a6bc-2860-495d-8fa4-76668ddcbcec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979071 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-internal-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979183 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979207 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-httpd-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979260 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v86lc\" (UniqueName: \"kubernetes.io/projected/44bed227-df87-4bda-8b89-2d54dc5735a4-kube-api-access-v86lc\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979369 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-public-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979428 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-combined-ca-bundle\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979481 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-ovndb-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:57 crc kubenswrapper[4967]: I1121 15:56:57.979568 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0d4a6bc-2860-495d-8fa4-76668ddcbcec-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082262 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082346 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-httpd-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082417 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v86lc\" (UniqueName: \"kubernetes.io/projected/44bed227-df87-4bda-8b89-2d54dc5735a4-kube-api-access-v86lc\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082500 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-public-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082556 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-combined-ca-bundle\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082625 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-ovndb-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.082772 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-internal-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.089173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.091960 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-ovndb-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.092096 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-internal-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.092152 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-httpd-config\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.092982 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-public-tls-certs\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.093133 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44bed227-df87-4bda-8b89-2d54dc5735a4-combined-ca-bundle\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.104821 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v86lc\" (UniqueName: \"kubernetes.io/projected/44bed227-df87-4bda-8b89-2d54dc5735a4-kube-api-access-v86lc\") pod \"neutron-5f8c4b98b5-tmhs4\" (UID: \"44bed227-df87-4bda-8b89-2d54dc5735a4\") " pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.193271 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7c9cd95b4b-sqmvx" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.275017 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68456f646b-s7b7f" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.275012 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68456f646b-s7b7f" event={"ID":"a0d4a6bc-2860-495d-8fa4-76668ddcbcec","Type":"ContainerDied","Data":"30247a3fc3eac644cbef26d1b2e0b63ba0ba1d4711dddf8396295bb943c7a674"} Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.275698 4967 scope.go:117] "RemoveContainer" containerID="b7a1cad6be9818a65e19e387824c52a515e65e96296ea132b987ac65a6778cd9" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.280234 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" event={"ID":"12732ea1-9536-4f66-8a18-14aec233a88a","Type":"ContainerStarted","Data":"201b55c7f24a83c563f141af40c14947117f902d844980de09d42c970f65b8de"} Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.281590 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.289825 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerStarted","Data":"f025580bf98c93b57599b9e747243fb87b1d585e13ad663372503a7e72922eaf"} Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.290188 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.315769 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" podStartSLOduration=3.315745105 podStartE2EDuration="3.315745105s" podCreationTimestamp="2025-11-21 15:56:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:58.300951027 +0000 UTC m=+1306.559472055" watchObservedRunningTime="2025-11-21 15:56:58.315745105 +0000 UTC m=+1306.574266113" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.327575 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5859ff54bd-hmqvr" podStartSLOduration=3.327547655 podStartE2EDuration="3.327547655s" podCreationTimestamp="2025-11-21 15:56:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:56:58.322670145 +0000 UTC m=+1306.581191153" watchObservedRunningTime="2025-11-21 15:56:58.327547655 +0000 UTC m=+1306.586068803" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.343347 4967 scope.go:117] "RemoveContainer" containerID="e4209bb0fb9ffacd5e770f3fe263b00db4c9df6775c3bc6150c23ab4302d1f08" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.354154 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.398656 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.420771 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.432894 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-68456f646b-s7b7f"] Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.557539 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83e7fc7b-7040-4fab-a499-fb9bf9c0fa92" path="/var/lib/kubelet/pods/83e7fc7b-7040-4fab-a499-fb9bf9c0fa92/volumes" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.559055 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0d4a6bc-2860-495d-8fa4-76668ddcbcec" path="/var/lib/kubelet/pods/a0d4a6bc-2860-495d-8fa4-76668ddcbcec/volumes" Nov 21 15:56:58 crc kubenswrapper[4967]: I1121 15:56:58.972565 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f8c4b98b5-tmhs4"] Nov 21 15:56:59 crc kubenswrapper[4967]: I1121 15:56:59.303198 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c76f304-d7ff-4488-bf08-228d143dae3d","Type":"ContainerStarted","Data":"11c38924738c4ef9ad7fb5a490065dd52ed0b64c23e618c587527e323ed415c6"} Nov 21 15:56:59 crc kubenswrapper[4967]: I1121 15:56:59.318208 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8c4b98b5-tmhs4" event={"ID":"44bed227-df87-4bda-8b89-2d54dc5735a4","Type":"ContainerStarted","Data":"47a004a58763d48601585fa4effc6fd9c47991490fb12f821e706c8accb6ae44"} Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.085549 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.097018 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.106518 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.106545 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.115369 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-g2cpb" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.151591 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.169751 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.170237 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.170396 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gvrpk\" (UniqueName: \"kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.189794 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.274613 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.274689 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gvrpk\" (UniqueName: \"kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.274790 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.274827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.277956 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.283019 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.299776 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.310885 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gvrpk\" (UniqueName: \"kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk\") pod \"openstackclient\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.335290 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8c4b98b5-tmhs4" event={"ID":"44bed227-df87-4bda-8b89-2d54dc5735a4","Type":"ContainerStarted","Data":"609ba2958d88e45d47a3b1d51fc1a1c32bb99175ff9b5833ab8d20b5236da562"} Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.335694 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8c4b98b5-tmhs4" event={"ID":"44bed227-df87-4bda-8b89-2d54dc5735a4","Type":"ContainerStarted","Data":"4da8b86356233055b7ce3604a0fdc8352a398a49c91737eb40c6c3ace443355a"} Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.336745 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.342654 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c76f304-d7ff-4488-bf08-228d143dae3d","Type":"ContainerStarted","Data":"c7ed6f39402bed4507c312890489abc840e981c0bcabfd0d6536836bf4f1dfb1"} Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.342692 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2c76f304-d7ff-4488-bf08-228d143dae3d","Type":"ContainerStarted","Data":"b1ad982750be299bf4a3befd8e0849f052feb7b0212bfb23728fb46738926bec"} Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.370070 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5f8c4b98b5-tmhs4" podStartSLOduration=3.370050918 podStartE2EDuration="3.370050918s" podCreationTimestamp="2025-11-21 15:56:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:00.36596403 +0000 UTC m=+1308.624485038" watchObservedRunningTime="2025-11-21 15:57:00.370050918 +0000 UTC m=+1308.628571926" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.393746 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.393732722 podStartE2EDuration="3.393732722s" podCreationTimestamp="2025-11-21 15:56:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:00.393070083 +0000 UTC m=+1308.651591091" watchObservedRunningTime="2025-11-21 15:57:00.393732722 +0000 UTC m=+1308.652253730" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.455922 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.456915 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.495684 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.563114 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.564571 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.564654 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: E1121 15:57:00.667260 4967 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 21 15:57:00 crc kubenswrapper[4967]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_8ed35ebc-c2f7-4801-8f6e-51090767bb19_0(9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76" Netns:"/var/run/netns/de4d33a5-36da-42cd-bbd2-2c96f87c1e70" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76;K8S_POD_UID=8ed35ebc-c2f7-4801-8f6e-51090767bb19" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/8ed35ebc-c2f7-4801-8f6e-51090767bb19]: expected pod UID "8ed35ebc-c2f7-4801-8f6e-51090767bb19" but got "4ff8059c-8c40-4326-b477-95c43286eb35" from Kube API Nov 21 15:57:00 crc kubenswrapper[4967]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 21 15:57:00 crc kubenswrapper[4967]: > Nov 21 15:57:00 crc kubenswrapper[4967]: E1121 15:57:00.667334 4967 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 21 15:57:00 crc kubenswrapper[4967]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_8ed35ebc-c2f7-4801-8f6e-51090767bb19_0(9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76" Netns:"/var/run/netns/de4d33a5-36da-42cd-bbd2-2c96f87c1e70" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=9362edcf9e1d2086e239dc389c9eea7562320ca571c953148a6ca03bab0f8a76;K8S_POD_UID=8ed35ebc-c2f7-4801-8f6e-51090767bb19" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/8ed35ebc-c2f7-4801-8f6e-51090767bb19]: expected pod UID "8ed35ebc-c2f7-4801-8f6e-51090767bb19" but got "4ff8059c-8c40-4326-b477-95c43286eb35" from Kube API Nov 21 15:57:00 crc kubenswrapper[4967]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 21 15:57:00 crc kubenswrapper[4967]: > pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.682887 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.682980 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.683177 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config-secret\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.683238 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq448\" (UniqueName: \"kubernetes.io/projected/4ff8059c-8c40-4326-b477-95c43286eb35-kube-api-access-vq448\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.784976 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config-secret\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.785365 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq448\" (UniqueName: \"kubernetes.io/projected/4ff8059c-8c40-4326-b477-95c43286eb35-kube-api-access-vq448\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.785411 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.785458 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.786253 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.789775 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-openstack-config-secret\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.791875 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ff8059c-8c40-4326-b477-95c43286eb35-combined-ca-bundle\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.801027 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq448\" (UniqueName: \"kubernetes.io/projected/4ff8059c-8c40-4326-b477-95c43286eb35-kube-api-access-vq448\") pod \"openstackclient\" (UID: \"4ff8059c-8c40-4326-b477-95c43286eb35\") " pod="openstack/openstackclient" Nov 21 15:57:00 crc kubenswrapper[4967]: I1121 15:57:00.922086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.355394 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.369195 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.372391 4967 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8ed35ebc-c2f7-4801-8f6e-51090767bb19" podUID="4ff8059c-8c40-4326-b477-95c43286eb35" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.399410 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret\") pod \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.399526 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config\") pod \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.399662 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle\") pod \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.399758 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gvrpk\" (UniqueName: \"kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk\") pod \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\" (UID: \"8ed35ebc-c2f7-4801-8f6e-51090767bb19\") " Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.406014 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "8ed35ebc-c2f7-4801-8f6e-51090767bb19" (UID: "8ed35ebc-c2f7-4801-8f6e-51090767bb19"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.411881 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk" (OuterVolumeSpecName: "kube-api-access-gvrpk") pod "8ed35ebc-c2f7-4801-8f6e-51090767bb19" (UID: "8ed35ebc-c2f7-4801-8f6e-51090767bb19"). InnerVolumeSpecName "kube-api-access-gvrpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.412675 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "8ed35ebc-c2f7-4801-8f6e-51090767bb19" (UID: "8ed35ebc-c2f7-4801-8f6e-51090767bb19"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.417456 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8ed35ebc-c2f7-4801-8f6e-51090767bb19" (UID: "8ed35ebc-c2f7-4801-8f6e-51090767bb19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.496519 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 15:57:01 crc kubenswrapper[4967]: W1121 15:57:01.500077 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ff8059c_8c40_4326_b477_95c43286eb35.slice/crio-7c9206c0d25ff40dbbf256008fd7d49388faf9487743f75f58c0e904b57a3503 WatchSource:0}: Error finding container 7c9206c0d25ff40dbbf256008fd7d49388faf9487743f75f58c0e904b57a3503: Status 404 returned error can't find the container with id 7c9206c0d25ff40dbbf256008fd7d49388faf9487743f75f58c0e904b57a3503 Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.501985 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.502013 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gvrpk\" (UniqueName: \"kubernetes.io/projected/8ed35ebc-c2f7-4801-8f6e-51090767bb19-kube-api-access-gvrpk\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.502025 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.502033 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/8ed35ebc-c2f7-4801-8f6e-51090767bb19-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:01 crc kubenswrapper[4967]: I1121 15:57:01.537476 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.369153 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4ff8059c-8c40-4326-b477-95c43286eb35","Type":"ContainerStarted","Data":"7c9206c0d25ff40dbbf256008fd7d49388faf9487743f75f58c0e904b57a3503"} Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.369173 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.385388 4967 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="8ed35ebc-c2f7-4801-8f6e-51090767bb19" podUID="4ff8059c-8c40-4326-b477-95c43286eb35" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.562921 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ed35ebc-c2f7-4801-8f6e-51090767bb19" path="/var/lib/kubelet/pods/8ed35ebc-c2f7-4801-8f6e-51090767bb19/volumes" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.564769 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.564816 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6d9cf75cd4-wgblt" Nov 21 15:57:02 crc kubenswrapper[4967]: I1121 15:57:02.786860 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.018045 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-794fb7d789-mkxk2"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.020399 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.023789 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.024037 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.024230 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.049761 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-794fb7d789-mkxk2"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.138428 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-546c2\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-kube-api-access-546c2\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.138505 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-public-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.138650 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-config-data\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.138693 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-run-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.138734 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-combined-ca-bundle\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.139122 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-log-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.139534 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-internal-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.139704 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-etc-swift\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.245066 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-546c2\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-kube-api-access-546c2\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.245166 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-public-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.245221 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-config-data\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.245276 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-run-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.245802 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-run-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.246394 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-combined-ca-bundle\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.246598 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-log-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.246878 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-internal-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.247084 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9488c46d-11de-4819-9784-e32e3893a5d9-log-httpd\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.247093 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-etc-swift\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.253323 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-public-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.254283 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-config-data\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.254493 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-combined-ca-bundle\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.255273 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9488c46d-11de-4819-9784-e32e3893a5d9-internal-tls-certs\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.267439 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-etc-swift\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.270507 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-546c2\" (UniqueName: \"kubernetes.io/projected/9488c46d-11de-4819-9784-e32e3893a5d9-kube-api-access-546c2\") pod \"swift-proxy-794fb7d789-mkxk2\" (UID: \"9488c46d-11de-4819-9784-e32e3893a5d9\") " pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.341938 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.653092 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.655192 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.679624 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.679889 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-z24sf" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.680016 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.683054 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.687427 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.770357 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.770403 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9fqx\" (UniqueName: \"kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.770532 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.770619 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.811697 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.813901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.840357 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.875437 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9fqx\" (UniqueName: \"kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.875836 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.875867 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swqwl\" (UniqueName: \"kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.876021 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.876051 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.876120 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.876159 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.876264 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.890841 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.908200 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.926133 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.934526 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.940036 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="dnsmasq-dns" containerID="cri-o://87a6628c2abd3227316091c58c41a52a4c03279bb98e7ff5369006ab99066d03" gracePeriod=10 Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.945662 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9fqx\" (UniqueName: \"kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx\") pod \"heat-engine-59bbb4975-mxdmz\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.996284 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.996395 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.996432 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.996665 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swqwl\" (UniqueName: \"kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:05 crc kubenswrapper[4967]: I1121 15:57:05.998751 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.006675 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.015237 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.017305 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.031410 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swqwl\" (UniqueName: \"kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.034372 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.035181 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data\") pod \"heat-cfnapi-54db5b96bc-8xjk7\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.036041 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.039911 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.061348 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:57:06 crc kubenswrapper[4967]: W1121 15:57:06.062414 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9488c46d_11de_4819_9784_e32e3893a5d9.slice/crio-420eb8305aabac5e6dcefc3a99781f3935af932d0ae39019a5d5eed5a1e5b00b WatchSource:0}: Error finding container 420eb8305aabac5e6dcefc3a99781f3935af932d0ae39019a5d5eed5a1e5b00b: Status 404 returned error can't find the container with id 420eb8305aabac5e6dcefc3a99781f3935af932d0ae39019a5d5eed5a1e5b00b Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.065803 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.082013 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.098512 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.098655 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.098776 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.098877 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.098944 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.099004 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.099083 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt92c\" (UniqueName: \"kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.099107 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9kmv\" (UniqueName: \"kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.099184 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.100143 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.100561 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.157013 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-794fb7d789-mkxk2"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.188276 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.208277 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.208380 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.208967 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209191 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209299 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209372 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209468 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209750 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt92c\" (UniqueName: \"kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.209785 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9kmv\" (UniqueName: \"kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.214971 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.217037 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.217037 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.217877 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.218099 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.218625 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.223376 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.228469 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9kmv\" (UniqueName: \"kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv\") pod \"heat-api-65548fddc5-76rgx\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.229636 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.239472 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt92c\" (UniqueName: \"kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c\") pod \"dnsmasq-dns-688b9f5b49-w6tqb\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.386871 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.401972 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.480472 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-794fb7d789-mkxk2" event={"ID":"9488c46d-11de-4819-9784-e32e3893a5d9","Type":"ContainerStarted","Data":"420eb8305aabac5e6dcefc3a99781f3935af932d0ae39019a5d5eed5a1e5b00b"} Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.513503 4967 generic.go:334] "Generic (PLEG): container finished" podID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerID="87a6628c2abd3227316091c58c41a52a4c03279bb98e7ff5369006ab99066d03" exitCode=0 Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.513584 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" event={"ID":"5527dd0f-b1df-4a90-846b-43dd6319bfa5","Type":"ContainerDied","Data":"87a6628c2abd3227316091c58c41a52a4c03279bb98e7ff5369006ab99066d03"} Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.764138 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.814662 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.945654 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.945721 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.945780 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.945913 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sswjz\" (UniqueName: \"kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.945983 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.946192 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0\") pod \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\" (UID: \"5527dd0f-b1df-4a90-846b-43dd6319bfa5\") " Nov 21 15:57:06 crc kubenswrapper[4967]: I1121 15:57:06.973544 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz" (OuterVolumeSpecName: "kube-api-access-sswjz") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "kube-api-access-sswjz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.049150 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sswjz\" (UniqueName: \"kubernetes.io/projected/5527dd0f-b1df-4a90-846b-43dd6319bfa5-kube-api-access-sswjz\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.246677 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.254939 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.273175 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config" (OuterVolumeSpecName: "config") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.288972 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.289412 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.302431 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5527dd0f-b1df-4a90-846b-43dd6319bfa5" (UID: "5527dd0f-b1df-4a90-846b-43dd6319bfa5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.307456 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:07 crc kubenswrapper[4967]: W1121 15:57:07.349807 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03b645f4_be5e_44ce_b67d_b6e9b1661282.slice/crio-ab577b485bed293a8dce7f728edeb473109fc742845302cac1ce47f22ac10804 WatchSource:0}: Error finding container ab577b485bed293a8dce7f728edeb473109fc742845302cac1ce47f22ac10804: Status 404 returned error can't find the container with id ab577b485bed293a8dce7f728edeb473109fc742845302cac1ce47f22ac10804 Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.356814 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.356845 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.356857 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.356869 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5527dd0f-b1df-4a90-846b-43dd6319bfa5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.544094 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-794fb7d789-mkxk2" event={"ID":"9488c46d-11de-4819-9784-e32e3893a5d9","Type":"ContainerStarted","Data":"16d2b17442e258c0492e680170e9f94c43019cf5b4baf9df12336ab97de27f5e"} Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.544132 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-794fb7d789-mkxk2" event={"ID":"9488c46d-11de-4819-9784-e32e3893a5d9","Type":"ContainerStarted","Data":"54bd6f4562871460860b7760eb9ba45c04b907e7bbc08eb9e6e1fbbc9f12ac6b"} Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.544559 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.544615 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.548630 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" event={"ID":"03b645f4-be5e-44ce-b67d-b6e9b1661282","Type":"ContainerStarted","Data":"ab577b485bed293a8dce7f728edeb473109fc742845302cac1ce47f22ac10804"} Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.551465 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59bbb4975-mxdmz" event={"ID":"f87aa54b-1478-41af-a049-56a703a25f04","Type":"ContainerStarted","Data":"ab907e5c8acb67e6d6c9fe0ebf8aa8cd3eeb87f939393c1b0e26eab8ecd5e1cd"} Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.562745 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" event={"ID":"5527dd0f-b1df-4a90-846b-43dd6319bfa5","Type":"ContainerDied","Data":"bf3f46237d00dd8265f163bfb23f3247b48950cc5e227a4d1fbb1580d31aef50"} Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.562900 4967 scope.go:117] "RemoveContainer" containerID="87a6628c2abd3227316091c58c41a52a4c03279bb98e7ff5369006ab99066d03" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.563051 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.587907 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-794fb7d789-mkxk2" podStartSLOduration=3.587884768 podStartE2EDuration="3.587884768s" podCreationTimestamp="2025-11-21 15:57:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:07.572512114 +0000 UTC m=+1315.831033122" watchObservedRunningTime="2025-11-21 15:57:07.587884768 +0000 UTC m=+1315.846405776" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.652373 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.656183 4967 scope.go:117] "RemoveContainer" containerID="6d1010d2ce2e9b54203f3c10fc78cd1e95689563030b05991037db1dcd3cf726" Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.680013 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-vbvm5"] Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.693140 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:57:07 crc kubenswrapper[4967]: I1121 15:57:07.774765 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.108007 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.579135 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" path="/var/lib/kubelet/pods/5527dd0f-b1df-4a90-846b-43dd6319bfa5/volumes" Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.603545 4967 generic.go:334] "Generic (PLEG): container finished" podID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerID="7d967aa61e05a24ed613d07f32d8bb266e091a1c52c9e7b8d8ea1c63c26399de" exitCode=0 Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.603617 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" event={"ID":"b316537c-ac90-4a7b-8cee-ed9cb7199f98","Type":"ContainerDied","Data":"7d967aa61e05a24ed613d07f32d8bb266e091a1c52c9e7b8d8ea1c63c26399de"} Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.603649 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" event={"ID":"b316537c-ac90-4a7b-8cee-ed9cb7199f98","Type":"ContainerStarted","Data":"fa541bcbdd262094577df0665e707ffae0e47a0c9c5cf804886cc4bb1569a18f"} Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.608118 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65548fddc5-76rgx" event={"ID":"89017806-bb3a-4c00-b40c-5f600c61ecff","Type":"ContainerStarted","Data":"6c4d8594bc08a78f291fb9f45443b02ea3bf0e89d7c53cd017125baf9516b8c1"} Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.614454 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59bbb4975-mxdmz" event={"ID":"f87aa54b-1478-41af-a049-56a703a25f04","Type":"ContainerStarted","Data":"b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d"} Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.614657 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:08 crc kubenswrapper[4967]: I1121 15:57:08.666849 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-59bbb4975-mxdmz" podStartSLOduration=3.666793 podStartE2EDuration="3.666793s" podCreationTimestamp="2025-11-21 15:57:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:08.652705703 +0000 UTC m=+1316.911226721" watchObservedRunningTime="2025-11-21 15:57:08.666793 +0000 UTC m=+1316.925314018" Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.318380 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.319045 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-central-agent" containerID="cri-o://7be10394e739b35ce1e663f218abef8e4a2ddd5a257751c4cd52cce365cd4b2b" gracePeriod=30 Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.319489 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" containerID="cri-o://020bef81047c8008782c5042de065aabb0805f186829f78f37a67754b692a572" gracePeriod=30 Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.319636 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="sg-core" containerID="cri-o://5ff7134e6aee4fbea9effdf90993ee33422c1178643d8badd53a1301364d5b9b" gracePeriod=30 Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.319703 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-notification-agent" containerID="cri-o://42cda58e8125226d31e745936b86915184a64bc5b49a6e9322d2439fcfc09688" gracePeriod=30 Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.375045 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.626412 4967 generic.go:334] "Generic (PLEG): container finished" podID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerID="5ff7134e6aee4fbea9effdf90993ee33422c1178643d8badd53a1301364d5b9b" exitCode=2 Nov 21 15:57:09 crc kubenswrapper[4967]: I1121 15:57:09.626470 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerDied","Data":"5ff7134e6aee4fbea9effdf90993ee33422c1178643d8badd53a1301364d5b9b"} Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.492682 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-fdv92"] Nov 21 15:57:10 crc kubenswrapper[4967]: E1121 15:57:10.497774 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="dnsmasq-dns" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.497812 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="dnsmasq-dns" Nov 21 15:57:10 crc kubenswrapper[4967]: E1121 15:57:10.497875 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="init" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.497882 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="init" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.498226 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5527dd0f-b1df-4a90-846b-43dd6319bfa5" containerName="dnsmasq-dns" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.499057 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.515172 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-fdv92"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.581437 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr2l8\" (UniqueName: \"kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.581651 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.581971 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-9c4e-account-create-kpr2p"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.583787 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.587246 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.612587 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-wrdfr"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.614750 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.636752 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-9c4e-account-create-kpr2p"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.657102 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wrdfr"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.673965 4967 generic.go:334] "Generic (PLEG): container finished" podID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerID="020bef81047c8008782c5042de065aabb0805f186829f78f37a67754b692a572" exitCode=0 Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.674000 4967 generic.go:334] "Generic (PLEG): container finished" podID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerID="42cda58e8125226d31e745936b86915184a64bc5b49a6e9322d2439fcfc09688" exitCode=0 Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.674032 4967 generic.go:334] "Generic (PLEG): container finished" podID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerID="7be10394e739b35ce1e663f218abef8e4a2ddd5a257751c4cd52cce365cd4b2b" exitCode=0 Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.674058 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerDied","Data":"020bef81047c8008782c5042de065aabb0805f186829f78f37a67754b692a572"} Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.674086 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerDied","Data":"42cda58e8125226d31e745936b86915184a64bc5b49a6e9322d2439fcfc09688"} Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.674096 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerDied","Data":"7be10394e739b35ce1e663f218abef8e4a2ddd5a257751c4cd52cce365cd4b2b"} Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.684596 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.684659 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.684959 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr2l8\" (UniqueName: \"kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.685020 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2zjnp\" (UniqueName: \"kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.685131 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.685225 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k99fh\" (UniqueName: \"kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.686236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.700859 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-xlhnx"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.702599 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.711215 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xlhnx"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.721434 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr2l8\" (UniqueName: \"kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8\") pod \"nova-api-db-create-fdv92\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788124 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k99fh\" (UniqueName: \"kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788482 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt67g\" (UniqueName: \"kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788515 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788611 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788695 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2zjnp\" (UniqueName: \"kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.788742 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.790816 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.790832 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.796534 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7b9f-account-create-ccd9f"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.799160 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.803271 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.810263 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k99fh\" (UniqueName: \"kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh\") pod \"nova-cell0-db-create-wrdfr\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.820339 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7b9f-account-create-ccd9f"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.824650 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.825153 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2zjnp\" (UniqueName: \"kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp\") pod \"nova-api-9c4e-account-create-kpr2p\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.890962 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.891134 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt67g\" (UniqueName: \"kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.892172 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.907198 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt67g\" (UniqueName: \"kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g\") pod \"nova-cell1-db-create-xlhnx\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.911436 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.945494 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.993377 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-2dfc-account-create-c7vxv"] Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.995209 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.995926 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtqb9\" (UniqueName: \"kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:10 crc kubenswrapper[4967]: I1121 15:57:10.995986 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.000602 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.009656 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2dfc-account-create-c7vxv"] Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.092998 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.099580 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42d85\" (UniqueName: \"kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.099678 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtqb9\" (UniqueName: \"kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.099768 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.099862 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.100546 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.118057 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtqb9\" (UniqueName: \"kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9\") pod \"nova-cell0-7b9f-account-create-ccd9f\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.201952 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42d85\" (UniqueName: \"kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.202378 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.203438 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.221862 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42d85\" (UniqueName: \"kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85\") pod \"nova-cell1-2dfc-account-create-c7vxv\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.267892 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:11 crc kubenswrapper[4967]: I1121 15:57:11.339142 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:12 crc kubenswrapper[4967]: I1121 15:57:12.442348 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:12 crc kubenswrapper[4967]: I1121 15:57:12.442712 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-log" containerID="cri-o://5b9bca81bb29bbba4965712b67772e9516b0de09b6b7075258575b7c8f627668" gracePeriod=30 Nov 21 15:57:12 crc kubenswrapper[4967]: I1121 15:57:12.442762 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-httpd" containerID="cri-o://874e5b6d9db34d44f43a3bd2057069b1e10198bf5cf524250d237bf0ce9ce8d7" gracePeriod=30 Nov 21 15:57:12 crc kubenswrapper[4967]: I1121 15:57:12.702295 4967 generic.go:334] "Generic (PLEG): container finished" podID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerID="5b9bca81bb29bbba4965712b67772e9516b0de09b6b7075258575b7c8f627668" exitCode=143 Nov 21 15:57:12 crc kubenswrapper[4967]: I1121 15:57:12.702361 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerDied","Data":"5b9bca81bb29bbba4965712b67772e9516b0de09b6b7075258575b7c8f627668"} Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.309886 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.313861 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.398744 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.403057 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.430006 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.439908 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.450994 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.459910 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.464515 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.465755 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.466038 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhwwz\" (UniqueName: \"kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.466322 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.483113 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.567957 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568045 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568073 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhwwz\" (UniqueName: \"kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568126 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568151 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568194 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9nhjh\" (UniqueName: \"kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568223 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568684 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568794 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8lw8\" (UniqueName: \"kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568844 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.568894 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.577511 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.579236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.582965 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.587450 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhwwz\" (UniqueName: \"kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz\") pod \"heat-engine-cf9748ff4-bql4m\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.653325 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670281 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670374 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8lw8\" (UniqueName: \"kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670489 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670524 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670618 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9nhjh\" (UniqueName: \"kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.670647 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.676582 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.678205 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.678297 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.679576 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.679707 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.680946 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.686810 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8lw8\" (UniqueName: \"kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8\") pod \"heat-cfnapi-657885494d-vrfht\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.693162 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9nhjh\" (UniqueName: \"kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh\") pod \"heat-api-9b44d55dc-pdsms\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.739996 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:13 crc kubenswrapper[4967]: I1121 15:57:13.765738 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.316817 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.200:3000/\": dial tcp 10.217.0.200:3000: connect: connection refused" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.512514 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.512869 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-log" containerID="cri-o://b2a9008ab5ab53fda96ffa0e2ded04125689e319f061812d7e5bd689a69dbf4a" gracePeriod=30 Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.512878 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-httpd" containerID="cri-o://aaf7de92700f13b43c6dda63553c636e3e8a02bb1b2dc4cef73764f54d4ba038" gracePeriod=30 Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.712380 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.728568 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.743610 4967 generic.go:334] "Generic (PLEG): container finished" podID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerID="b2a9008ab5ab53fda96ffa0e2ded04125689e319f061812d7e5bd689a69dbf4a" exitCode=143 Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.743653 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerDied","Data":"b2a9008ab5ab53fda96ffa0e2ded04125689e319f061812d7e5bd689a69dbf4a"} Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.790038 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.791829 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.799847 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.808964 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.817398 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.818990 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.832341 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.849638 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.849713 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.872172 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.901748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.901837 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnz8q\" (UniqueName: \"kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.901997 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.902187 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.902267 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:14 crc kubenswrapper[4967]: I1121 15:57:14.902471 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006760 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnz8q\" (UniqueName: \"kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006815 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006883 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006916 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kg8cd\" (UniqueName: \"kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006941 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.006983 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007010 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007076 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007128 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007161 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007180 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.007196 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.013550 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.014022 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.016338 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.017012 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.021902 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.027359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnz8q\" (UniqueName: \"kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q\") pod \"heat-api-f74c7fcfc-6jdr9\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109459 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kg8cd\" (UniqueName: \"kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109575 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109746 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109878 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109915 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.109946 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.114254 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.114516 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.114859 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.114859 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.119791 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.120771 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.128625 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kg8cd\" (UniqueName: \"kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd\") pod \"heat-cfnapi-7d484d94c7-dntt2\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.151462 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.356515 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.359762 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-794fb7d789-mkxk2" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.624937 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:44448->10.217.0.188:9292: read: connection reset by peer" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.625077 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.188:9292/healthcheck\": read tcp 10.217.0.2:44438->10.217.0.188:9292: read: connection reset by peer" Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.764087 4967 generic.go:334] "Generic (PLEG): container finished" podID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerID="874e5b6d9db34d44f43a3bd2057069b1e10198bf5cf524250d237bf0ce9ce8d7" exitCode=0 Nov 21 15:57:15 crc kubenswrapper[4967]: I1121 15:57:15.764201 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerDied","Data":"874e5b6d9db34d44f43a3bd2057069b1e10198bf5cf524250d237bf0ce9ce8d7"} Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.806904 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"092d2168-5e3a-4967-a0b7-2f4b85a90487","Type":"ContainerDied","Data":"a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497"} Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.807326 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a274682d609fa8944c0fc3a36919e4b194c6a57944325e0ff6b3bf96ea73d497" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.814629 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e5ec60e1-6164-417c-bd54-dba2488c532a","Type":"ContainerDied","Data":"fc6060a83782e39b40ef1baa5d556eece025fe0063f51f2f6d9f6b718e77ae97"} Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.814686 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc6060a83782e39b40ef1baa5d556eece025fe0063f51f2f6d9f6b718e77ae97" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.868928 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.905906 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996130 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996341 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996574 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996665 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996895 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.996940 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cr9rq\" (UniqueName: \"kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.997097 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd\") pod \"e5ec60e1-6164-417c-bd54-dba2488c532a\" (UID: \"e5ec60e1-6164-417c-bd54-dba2488c532a\") " Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.997605 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.998032 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.999286 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:17 crc kubenswrapper[4967]: I1121 15:57:17.999325 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e5ec60e1-6164-417c-bd54-dba2488c532a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.009969 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts" (OuterVolumeSpecName: "scripts") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.012823 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq" (OuterVolumeSpecName: "kube-api-access-cr9rq") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "kube-api-access-cr9rq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102462 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102585 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102715 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102838 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102872 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102902 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.102954 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.103034 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6tls\" (UniqueName: \"kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls\") pod \"092d2168-5e3a-4967-a0b7-2f4b85a90487\" (UID: \"092d2168-5e3a-4967-a0b7-2f4b85a90487\") " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.106259 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.107385 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cr9rq\" (UniqueName: \"kubernetes.io/projected/e5ec60e1-6164-417c-bd54-dba2488c532a-kube-api-access-cr9rq\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.107423 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.107753 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs" (OuterVolumeSpecName: "logs") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.154115 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts" (OuterVolumeSpecName: "scripts") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.154301 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.172472 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls" (OuterVolumeSpecName: "kube-api-access-n6tls") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "kube-api-access-n6tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.213420 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6tls\" (UniqueName: \"kubernetes.io/projected/092d2168-5e3a-4967-a0b7-2f4b85a90487-kube-api-access-n6tls\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.213476 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.213487 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.213687 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.213700 4967 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/092d2168-5e3a-4967-a0b7-2f4b85a90487-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.260355 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.273383 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.316758 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.317882 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.332406 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.333335 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.344726 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-fdv92"] Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.359752 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-9c4e-account-create-kpr2p"] Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.377408 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.385487 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.421050 4967 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.421092 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.425139 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data" (OuterVolumeSpecName: "config-data") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.435462 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data" (OuterVolumeSpecName: "config-data") pod "092d2168-5e3a-4967-a0b7-2f4b85a90487" (UID: "092d2168-5e3a-4967-a0b7-2f4b85a90487"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.441936 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5ec60e1-6164-417c-bd54-dba2488c532a" (UID: "e5ec60e1-6164-417c-bd54-dba2488c532a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.523795 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.523838 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/092d2168-5e3a-4967-a0b7-2f4b85a90487-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.523852 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5ec60e1-6164-417c-bd54-dba2488c532a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:18 crc kubenswrapper[4967]: W1121 15:57:18.554230 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfc30aad_8b31_4143_a494_fb327041e699.slice/crio-c28a577d429fd166dc8838954fc2242895dcede88655e31c4ef6c49ce02c81b8 WatchSource:0}: Error finding container c28a577d429fd166dc8838954fc2242895dcede88655e31c4ef6c49ce02c81b8: Status 404 returned error can't find the container with id c28a577d429fd166dc8838954fc2242895dcede88655e31c4ef6c49ce02c81b8 Nov 21 15:57:18 crc kubenswrapper[4967]: W1121 15:57:18.565270 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf57422a0_a226_4bd9_8dc7_ebfee76b5745.slice/crio-afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0 WatchSource:0}: Error finding container afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0: Status 404 returned error can't find the container with id afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0 Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.638360 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.927339 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"4ff8059c-8c40-4326-b477-95c43286eb35","Type":"ContainerStarted","Data":"f5f42ef79edaa58a59049810518f8f35162bdd82026f3fff2b2d882778c79d44"} Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.930933 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-657885494d-vrfht" event={"ID":"dfc30aad-8b31-4143-a494-fb327041e699","Type":"ContainerStarted","Data":"c28a577d429fd166dc8838954fc2242895dcede88655e31c4ef6c49ce02c81b8"} Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.932788 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" event={"ID":"b316537c-ac90-4a7b-8cee-ed9cb7199f98","Type":"ContainerStarted","Data":"2562c13354817d0fa6368e222c43df2c762ca08d466429c2d6e8273a55b89711"} Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.933996 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.948907 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9c4e-account-create-kpr2p" event={"ID":"f57422a0-a226-4bd9-8dc7-ebfee76b5745","Type":"ContainerStarted","Data":"afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0"} Nov 21 15:57:18 crc kubenswrapper[4967]: I1121 15:57:18.958477 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.384496174 podStartE2EDuration="18.958451571s" podCreationTimestamp="2025-11-21 15:57:00 +0000 UTC" firstStartedPulling="2025-11-21 15:57:01.502193268 +0000 UTC m=+1309.760714276" lastFinishedPulling="2025-11-21 15:57:17.076148665 +0000 UTC m=+1325.334669673" observedRunningTime="2025-11-21 15:57:18.942022036 +0000 UTC m=+1327.200543054" watchObservedRunningTime="2025-11-21 15:57:18.958451571 +0000 UTC m=+1327.216972609" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:18.995957 4967 generic.go:334] "Generic (PLEG): container finished" podID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerID="aaf7de92700f13b43c6dda63553c636e3e8a02bb1b2dc4cef73764f54d4ba038" exitCode=0 Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:18.996044 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerDied","Data":"aaf7de92700f13b43c6dda63553c636e3e8a02bb1b2dc4cef73764f54d4ba038"} Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.005636 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-fdv92" event={"ID":"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16","Type":"ContainerStarted","Data":"30fb6d9eccfe264abf77afa038f5acc3d63bdbaef94dee4f66617ff87cc2a36d"} Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.012184 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" event={"ID":"03b645f4-be5e-44ce-b67d-b6e9b1661282","Type":"ContainerStarted","Data":"dabf0fc777157a9015b5a637c9bb93a64e65b260757c07bbe539e29d362e1fc5"} Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.012354 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" podUID="03b645f4-be5e-44ce-b67d-b6e9b1661282" containerName="heat-cfnapi" containerID="cri-o://dabf0fc777157a9015b5a637c9bb93a64e65b260757c07bbe539e29d362e1fc5" gracePeriod=60 Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.012637 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.017190 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" event={"ID":"c53783e2-ed84-49c6-b688-9c6603a6c3b1","Type":"ContainerStarted","Data":"68fe86b4a739481af4db71d759271c3ac7d8209215d43bb5163bbc14df259bc2"} Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.017273 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.017331 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.020200 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" podStartSLOduration=14.020180874 podStartE2EDuration="14.020180874s" podCreationTimestamp="2025-11-21 15:57:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:18.988097487 +0000 UTC m=+1327.246618495" watchObservedRunningTime="2025-11-21 15:57:19.020180874 +0000 UTC m=+1327.278701892" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.040673 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" podStartSLOduration=4.291364209 podStartE2EDuration="14.040645395s" podCreationTimestamp="2025-11-21 15:57:05 +0000 UTC" firstStartedPulling="2025-11-21 15:57:07.352624023 +0000 UTC m=+1315.611145041" lastFinishedPulling="2025-11-21 15:57:17.101905229 +0000 UTC m=+1325.360426227" observedRunningTime="2025-11-21 15:57:19.030754979 +0000 UTC m=+1327.289275987" watchObservedRunningTime="2025-11-21 15:57:19.040645395 +0000 UTC m=+1327.299166413" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.104635 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.146462 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.189423 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.209803 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.231719 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232262 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-log" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232274 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-log" Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232284 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-notification-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232290 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-notification-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232345 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232353 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232362 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="sg-core" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232368 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="sg-core" Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232388 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232394 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: E1121 15:57:19.232413 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-central-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232419 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-central-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232656 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-log" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232711 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-central-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232728 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="sg-core" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232752 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="ceilometer-notification-agent" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232766 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" containerName="glance-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.232776 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" containerName="proxy-httpd" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.234064 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.237143 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.238757 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.246289 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.249957 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.254733 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.255045 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.284262 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310254 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310390 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310423 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310451 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-config-data\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310490 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-scripts\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310562 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310613 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310645 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310672 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q276s\" (UniqueName: \"kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310749 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-logs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310850 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmw5s\" (UniqueName: \"kubernetes.io/projected/524adcf3-b5fb-468d-8964-f88d58729d57-kube-api-access-kmw5s\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310940 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.310984 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.311513 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.311584 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.324727 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.330634 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.406272 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wrdfr"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413186 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413383 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413452 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk86t\" (UniqueName: \"kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413476 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413602 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413657 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413711 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.413791 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs\") pod \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\" (UID: \"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f\") " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414150 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q276s\" (UniqueName: \"kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414202 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-logs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414346 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmw5s\" (UniqueName: \"kubernetes.io/projected/524adcf3-b5fb-468d-8964-f88d58729d57-kube-api-access-kmw5s\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414477 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414526 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414549 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414599 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414641 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414692 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414720 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414742 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-config-data\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414781 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-scripts\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414832 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414878 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.414901 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.415913 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.416345 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.425951 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.430413 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.431900 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.432590 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.433005 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs" (OuterVolumeSpecName: "logs") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.435364 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524adcf3-b5fb-468d-8964-f88d58729d57-logs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.438682 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xlhnx"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.439582 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-scripts\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.440456 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.452845 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7b9f-account-create-ccd9f"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.463553 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t" (OuterVolumeSpecName: "kube-api-access-hk86t") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "kube-api-access-hk86t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.464034 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.464324 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmw5s\" (UniqueName: \"kubernetes.io/projected/524adcf3-b5fb-468d-8964-f88d58729d57-kube-api-access-kmw5s\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.464748 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-2dfc-account-create-c7vxv"] Nov 21 15:57:19 crc kubenswrapper[4967]: W1121 15:57:19.470265 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8fd0d7f_cf04_4fef_8825_4a5f82a76e22.slice/crio-b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c WatchSource:0}: Error finding container b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c: Status 404 returned error can't find the container with id b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.471297 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.471484 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts" (OuterVolumeSpecName: "scripts") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.472252 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.476464 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "glance") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.482150 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.485141 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524adcf3-b5fb-468d-8964-f88d58729d57-config-data\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.492848 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.495250 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.508224 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q276s\" (UniqueName: \"kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s\") pod \"ceilometer-0\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.518775 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.518981 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.519009 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.519021 4967 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.519032 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk86t\" (UniqueName: \"kubernetes.io/projected/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-kube-api-access-hk86t\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.522475 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 15:57:19 crc kubenswrapper[4967]: W1121 15:57:19.571999 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6485dc15_e7ba_4abd_9a28_73b7ea9aa4b4.slice/crio-542636f858f2c65741052ea66bf9657766f5310d7fbfbe2823f622170e6d1307 WatchSource:0}: Error finding container 542636f858f2c65741052ea66bf9657766f5310d7fbfbe2823f622170e6d1307: Status 404 returned error can't find the container with id 542636f858f2c65741052ea66bf9657766f5310d7fbfbe2823f622170e6d1307 Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.609642 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.610182 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.616153 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.841444 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"524adcf3-b5fb-468d-8964-f88d58729d57\") " pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.855810 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.852249 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.885446 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.933983 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.938359 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.938385 4967 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.938397 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:19 crc kubenswrapper[4967]: I1121 15:57:19.971043 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data" (OuterVolumeSpecName: "config-data") pod "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" (UID: "7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.040061 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.047436 4967 generic.go:334] "Generic (PLEG): container finished" podID="03b645f4-be5e-44ce-b67d-b6e9b1661282" containerID="dabf0fc777157a9015b5a637c9bb93a64e65b260757c07bbe539e29d362e1fc5" exitCode=0 Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.047522 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" event={"ID":"03b645f4-be5e-44ce-b67d-b6e9b1661282","Type":"ContainerDied","Data":"dabf0fc777157a9015b5a637c9bb93a64e65b260757c07bbe539e29d362e1fc5"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.060258 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xlhnx" event={"ID":"22f15a48-c17e-477d-90d1-ea57d28f1457","Type":"ContainerStarted","Data":"2e56fb6810b2286abc8dfb93a07f4fdfb0e94fd0e9034e0262abb4f418207146"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.064755 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.073378 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65548fddc5-76rgx" event={"ID":"89017806-bb3a-4c00-b40c-5f600c61ecff","Type":"ContainerStarted","Data":"4e0bfc97a856884e79374721220be718f212cf77f99db97d941188c339190546"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.073449 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-65548fddc5-76rgx" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerName="heat-api" containerID="cri-o://4e0bfc97a856884e79374721220be718f212cf77f99db97d941188c339190546" gracePeriod=60 Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.073644 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.112412 4967 generic.go:334] "Generic (PLEG): container finished" podID="dfc30aad-8b31-4143-a494-fb327041e699" containerID="a7a0894383e5c8f4d461746283f948152266e77ef62993113c825d6946c66c0d" exitCode=1 Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.112514 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-657885494d-vrfht" event={"ID":"dfc30aad-8b31-4143-a494-fb327041e699","Type":"ContainerDied","Data":"a7a0894383e5c8f4d461746283f948152266e77ef62993113c825d6946c66c0d"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.113200 4967 scope.go:117] "RemoveContainer" containerID="a7a0894383e5c8f4d461746283f948152266e77ef62993113c825d6946c66c0d" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.116706 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9b44d55dc-pdsms" event={"ID":"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4","Type":"ContainerStarted","Data":"542636f858f2c65741052ea66bf9657766f5310d7fbfbe2823f622170e6d1307"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.122470 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-f74c7fcfc-6jdr9" event={"ID":"fba7e61c-eb55-4772-9904-7e6ae77ec941","Type":"ContainerStarted","Data":"ed942c1a50b45424d039da2cfd87491ab74a847ae9e9ba6e30c19b7702c07651"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.123411 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" event={"ID":"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761","Type":"ContainerStarted","Data":"61cabedcda0d54f67ae3ba07644ee91a49d041ea7caeff6f8ead24a4570b67d5"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.133715 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-65548fddc5-76rgx" podStartSLOduration=4.267245433 podStartE2EDuration="15.133699545s" podCreationTimestamp="2025-11-21 15:57:05 +0000 UTC" firstStartedPulling="2025-11-21 15:57:07.767930199 +0000 UTC m=+1316.026451207" lastFinishedPulling="2025-11-21 15:57:18.634384311 +0000 UTC m=+1326.892905319" observedRunningTime="2025-11-21 15:57:20.100381043 +0000 UTC m=+1328.358902051" watchObservedRunningTime="2025-11-21 15:57:20.133699545 +0000 UTC m=+1328.392220553" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.142933 4967 generic.go:334] "Generic (PLEG): container finished" podID="f57422a0-a226-4bd9-8dc7-ebfee76b5745" containerID="962abd56387290c95f8600b8a5b53ef4285dcb8d4ef098aaa3a5d5a53b533f65" exitCode=0 Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.143397 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9c4e-account-create-kpr2p" event={"ID":"f57422a0-a226-4bd9-8dc7-ebfee76b5745","Type":"ContainerDied","Data":"962abd56387290c95f8600b8a5b53ef4285dcb8d4ef098aaa3a5d5a53b533f65"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.155031 4967 generic.go:334] "Generic (PLEG): container finished" podID="e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" containerID="4f34fa2b5e434b0b6be9fe203444c2a143aae4ff35e08de7ea19c220337413f5" exitCode=0 Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.155085 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-fdv92" event={"ID":"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16","Type":"ContainerDied","Data":"4f34fa2b5e434b0b6be9fe203444c2a143aae4ff35e08de7ea19c220337413f5"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.161342 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" event={"ID":"c53783e2-ed84-49c6-b688-9c6603a6c3b1","Type":"ContainerStarted","Data":"38a92be94a6966320e3547c6cbaffa992d053ccbe6f193300d9dceb07407aad2"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.162225 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.166721 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-cf9748ff4-bql4m" event={"ID":"93184620-a042-499a-bb5b-3d8719a73436","Type":"ContainerStarted","Data":"6bf6064d3b17657fd30d72a117968f00910f9e96a01e3bd7c188e62f25fc254d"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.191604 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" event={"ID":"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22","Type":"ContainerStarted","Data":"b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.232724 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.243087 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f","Type":"ContainerDied","Data":"9b1bd8e379286f1052ec9cfaf2e14f8f29b3433246e42c0fdd54aed222356459"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.243140 4967 scope.go:117] "RemoveContainer" containerID="aaf7de92700f13b43c6dda63553c636e3e8a02bb1b2dc4cef73764f54d4ba038" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.257598 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wrdfr" event={"ID":"8fe6e979-4546-4f1f-8c36-d57c3bb578bf","Type":"ContainerStarted","Data":"bb7e2c520c044d955209f2b6adbb6d18b9d375cfd59afc241d5d101acf86a296"} Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.265349 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" podStartSLOduration=6.265325137 podStartE2EDuration="6.265325137s" podCreationTimestamp="2025-11-21 15:57:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:20.230171741 +0000 UTC m=+1328.488692749" watchObservedRunningTime="2025-11-21 15:57:20.265325137 +0000 UTC m=+1328.523846155" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.459661 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.555836 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="092d2168-5e3a-4967-a0b7-2f4b85a90487" path="/var/lib/kubelet/pods/092d2168-5e3a-4967-a0b7-2f4b85a90487/volumes" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.557361 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ec60e1-6164-417c-bd54-dba2488c532a" path="/var/lib/kubelet/pods/e5ec60e1-6164-417c-bd54-dba2488c532a/volumes" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.685446 4967 scope.go:117] "RemoveContainer" containerID="b2a9008ab5ab53fda96ffa0e2ded04125689e319f061812d7e5bd689a69dbf4a" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.818115 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.863566 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.968238 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swqwl\" (UniqueName: \"kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl\") pod \"03b645f4-be5e-44ce-b67d-b6e9b1661282\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.968394 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle\") pod \"03b645f4-be5e-44ce-b67d-b6e9b1661282\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.968630 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data\") pod \"03b645f4-be5e-44ce-b67d-b6e9b1661282\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.968815 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom\") pod \"03b645f4-be5e-44ce-b67d-b6e9b1661282\" (UID: \"03b645f4-be5e-44ce-b67d-b6e9b1661282\") " Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.976356 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl" (OuterVolumeSpecName: "kube-api-access-swqwl") pod "03b645f4-be5e-44ce-b67d-b6e9b1661282" (UID: "03b645f4-be5e-44ce-b67d-b6e9b1661282"). InnerVolumeSpecName "kube-api-access-swqwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:20 crc kubenswrapper[4967]: I1121 15:57:20.985045 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "03b645f4-be5e-44ce-b67d-b6e9b1661282" (UID: "03b645f4-be5e-44ce-b67d-b6e9b1661282"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.071473 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.071509 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swqwl\" (UniqueName: \"kubernetes.io/projected/03b645f4-be5e-44ce-b67d-b6e9b1661282-kube-api-access-swqwl\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.287739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerStarted","Data":"6adaf537fed5167d0086a4a94b90a55d30efb02448151c8ca29fb1f81e8f0da7"} Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.293401 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" event={"ID":"03b645f4-be5e-44ce-b67d-b6e9b1661282","Type":"ContainerDied","Data":"ab577b485bed293a8dce7f728edeb473109fc742845302cac1ce47f22ac10804"} Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.293690 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-54db5b96bc-8xjk7" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.294618 4967 scope.go:117] "RemoveContainer" containerID="dabf0fc777157a9015b5a637c9bb93a64e65b260757c07bbe539e29d362e1fc5" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.304572 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"524adcf3-b5fb-468d-8964-f88d58729d57","Type":"ContainerStarted","Data":"8ede1e5f4e084565a9712207fe9adaf24148c2360fb1873bf78811f01fe27b82"} Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.322055 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-xlhnx" podStartSLOduration=11.322036317 podStartE2EDuration="11.322036317s" podCreationTimestamp="2025-11-21 15:57:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:21.317060223 +0000 UTC m=+1329.575581231" watchObservedRunningTime="2025-11-21 15:57:21.322036317 +0000 UTC m=+1329.580557325" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.408752 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03b645f4-be5e-44ce-b67d-b6e9b1661282" (UID: "03b645f4-be5e-44ce-b67d-b6e9b1661282"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.493399 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.546512 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data" (OuterVolumeSpecName: "config-data") pod "03b645f4-be5e-44ce-b67d-b6e9b1661282" (UID: "03b645f4-be5e-44ce-b67d-b6e9b1661282"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.603655 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03b645f4-be5e-44ce-b67d-b6e9b1661282-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.669721 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:21 crc kubenswrapper[4967]: I1121 15:57:21.691005 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-54db5b96bc-8xjk7"] Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.326709 4967 generic.go:334] "Generic (PLEG): container finished" podID="22f15a48-c17e-477d-90d1-ea57d28f1457" containerID="64eff180864c1f037e56b88c4f75e3c4ed44a1ec22455367107adc5bb31de9db" exitCode=0 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.327354 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xlhnx" event={"ID":"22f15a48-c17e-477d-90d1-ea57d28f1457","Type":"ContainerDied","Data":"64eff180864c1f037e56b88c4f75e3c4ed44a1ec22455367107adc5bb31de9db"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.329767 4967 generic.go:334] "Generic (PLEG): container finished" podID="8fe6e979-4546-4f1f-8c36-d57c3bb578bf" containerID="474bed44826e965d976c3d9e8b0d2b739ca8ae2309504f4c69a8d78eaf00d9bf" exitCode=0 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.329818 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wrdfr" event={"ID":"8fe6e979-4546-4f1f-8c36-d57c3bb578bf","Type":"ContainerDied","Data":"474bed44826e965d976c3d9e8b0d2b739ca8ae2309504f4c69a8d78eaf00d9bf"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.333870 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"524adcf3-b5fb-468d-8964-f88d58729d57","Type":"ContainerStarted","Data":"629761ffae445bacfc2559a90c10d2ba90f2a5a43393a17476f2d587fcc7df94"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.339091 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerStarted","Data":"4b912915211357dc6ae2523ac847e8c974f536d20ec92b6d83bca803bc4da52f"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.342841 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-9c4e-account-create-kpr2p" event={"ID":"f57422a0-a226-4bd9-8dc7-ebfee76b5745","Type":"ContainerDied","Data":"afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.342877 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afb4968ff90b3c3a57d508c9ad44029dd6cd2f36631f68f2fcfc34fb2aeb79d0" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.342996 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.351650 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-cf9748ff4-bql4m" event={"ID":"93184620-a042-499a-bb5b-3d8719a73436","Type":"ContainerStarted","Data":"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.352727 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.355802 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.361443 4967 generic.go:334] "Generic (PLEG): container finished" podID="f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" containerID="86d40ebd8df9f146aee1086f634b627cd8746a19100c5cbe08b74e88ed893255" exitCode=0 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.361529 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" event={"ID":"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22","Type":"ContainerDied","Data":"86d40ebd8df9f146aee1086f634b627cd8746a19100c5cbe08b74e88ed893255"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.363756 4967 generic.go:334] "Generic (PLEG): container finished" podID="7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" containerID="7bfd2aa0456954b687486a48f4e829eca9f8e1ccc619b50708eaa953365eb14f" exitCode=0 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.363820 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" event={"ID":"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761","Type":"ContainerDied","Data":"7bfd2aa0456954b687486a48f4e829eca9f8e1ccc619b50708eaa953365eb14f"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.368963 4967 generic.go:334] "Generic (PLEG): container finished" podID="dfc30aad-8b31-4143-a494-fb327041e699" containerID="774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618" exitCode=1 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.369040 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-657885494d-vrfht" event={"ID":"dfc30aad-8b31-4143-a494-fb327041e699","Type":"ContainerDied","Data":"774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.369080 4967 scope.go:117] "RemoveContainer" containerID="a7a0894383e5c8f4d461746283f948152266e77ef62993113c825d6946c66c0d" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.369985 4967 scope.go:117] "RemoveContainer" containerID="774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618" Nov 21 15:57:22 crc kubenswrapper[4967]: E1121 15:57:22.370299 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-657885494d-vrfht_openstack(dfc30aad-8b31-4143-a494-fb327041e699)\"" pod="openstack/heat-cfnapi-657885494d-vrfht" podUID="dfc30aad-8b31-4143-a494-fb327041e699" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.384911 4967 generic.go:334] "Generic (PLEG): container finished" podID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerID="b6ac88c0212fb950f8cc6bd899cd66204e4961a7955948988fe1769cee8591fd" exitCode=1 Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.385028 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9b44d55dc-pdsms" event={"ID":"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4","Type":"ContainerDied","Data":"b6ac88c0212fb950f8cc6bd899cd66204e4961a7955948988fe1769cee8591fd"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.387588 4967 scope.go:117] "RemoveContainer" containerID="b6ac88c0212fb950f8cc6bd899cd66204e4961a7955948988fe1769cee8591fd" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.388017 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-fdv92" event={"ID":"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16","Type":"ContainerDied","Data":"30fb6d9eccfe264abf77afa038f5acc3d63bdbaef94dee4f66617ff87cc2a36d"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.388061 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30fb6d9eccfe264abf77afa038f5acc3d63bdbaef94dee4f66617ff87cc2a36d" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.388123 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-fdv92" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.392767 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-f74c7fcfc-6jdr9" event={"ID":"fba7e61c-eb55-4772-9904-7e6ae77ec941","Type":"ContainerStarted","Data":"e7a50f5b93a733956a7277975bb528d0b22dd89e5d74ffafc51a311fbe5d541a"} Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.392865 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.395994 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-cf9748ff4-bql4m" podStartSLOduration=9.395979336 podStartE2EDuration="9.395979336s" podCreationTimestamp="2025-11-21 15:57:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:22.380577581 +0000 UTC m=+1330.639098589" watchObservedRunningTime="2025-11-21 15:57:22.395979336 +0000 UTC m=+1330.654500344" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.475501 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sr2l8\" (UniqueName: \"kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8\") pod \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.475627 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts\") pod \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\" (UID: \"e2e282d2-51b3-46f2-9ce8-faa9ad9fec16\") " Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.475646 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2zjnp\" (UniqueName: \"kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp\") pod \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.475883 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts\") pod \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\" (UID: \"f57422a0-a226-4bd9-8dc7-ebfee76b5745\") " Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.477444 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" (UID: "e2e282d2-51b3-46f2-9ce8-faa9ad9fec16"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.478186 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f57422a0-a226-4bd9-8dc7-ebfee76b5745" (UID: "f57422a0-a226-4bd9-8dc7-ebfee76b5745"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.481921 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f57422a0-a226-4bd9-8dc7-ebfee76b5745-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.481948 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.490869 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8" (OuterVolumeSpecName: "kube-api-access-sr2l8") pod "e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" (UID: "e2e282d2-51b3-46f2-9ce8-faa9ad9fec16"). InnerVolumeSpecName "kube-api-access-sr2l8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.492282 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp" (OuterVolumeSpecName: "kube-api-access-2zjnp") pod "f57422a0-a226-4bd9-8dc7-ebfee76b5745" (UID: "f57422a0-a226-4bd9-8dc7-ebfee76b5745"). InnerVolumeSpecName "kube-api-access-2zjnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.561775 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03b645f4-be5e-44ce-b67d-b6e9b1661282" path="/var/lib/kubelet/pods/03b645f4-be5e-44ce-b67d-b6e9b1661282/volumes" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.571808 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-f74c7fcfc-6jdr9" podStartSLOduration=8.571789334 podStartE2EDuration="8.571789334s" podCreationTimestamp="2025-11-21 15:57:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:22.5127959 +0000 UTC m=+1330.771316918" watchObservedRunningTime="2025-11-21 15:57:22.571789334 +0000 UTC m=+1330.830310342" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.586340 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sr2l8\" (UniqueName: \"kubernetes.io/projected/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16-kube-api-access-sr2l8\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:22 crc kubenswrapper[4967]: I1121 15:57:22.586379 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2zjnp\" (UniqueName: \"kubernetes.io/projected/f57422a0-a226-4bd9-8dc7-ebfee76b5745-kube-api-access-2zjnp\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.434918 4967 scope.go:117] "RemoveContainer" containerID="774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618" Nov 21 15:57:23 crc kubenswrapper[4967]: E1121 15:57:23.435817 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-657885494d-vrfht_openstack(dfc30aad-8b31-4143-a494-fb327041e699)\"" pod="openstack/heat-cfnapi-657885494d-vrfht" podUID="dfc30aad-8b31-4143-a494-fb327041e699" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.457418 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"524adcf3-b5fb-468d-8964-f88d58729d57","Type":"ContainerStarted","Data":"a69a968c8b13e542b82610bef617f178eb2b04ec1ba193e09959af5a92c19e52"} Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.473086 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerStarted","Data":"b764bcf3115b4a314b250266927c14a33925a2d1a33ced19c9871713cdb99141"} Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.475798 4967 generic.go:334] "Generic (PLEG): container finished" podID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerID="961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66" exitCode=1 Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.476451 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9b44d55dc-pdsms" event={"ID":"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4","Type":"ContainerDied","Data":"961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66"} Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.476523 4967 scope.go:117] "RemoveContainer" containerID="b6ac88c0212fb950f8cc6bd899cd66204e4961a7955948988fe1769cee8591fd" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.476660 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-9c4e-account-create-kpr2p" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.476675 4967 scope.go:117] "RemoveContainer" containerID="961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66" Nov 21 15:57:23 crc kubenswrapper[4967]: E1121 15:57:23.476927 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9b44d55dc-pdsms_openstack(6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4)\"" pod="openstack/heat-api-9b44d55dc-pdsms" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.555482 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.555456005 podStartE2EDuration="4.555456005s" podCreationTimestamp="2025-11-21 15:57:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:57:23.511366131 +0000 UTC m=+1331.769887149" watchObservedRunningTime="2025-11-21 15:57:23.555456005 +0000 UTC m=+1331.813977013" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.740486 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.740780 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.766480 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:23 crc kubenswrapper[4967]: I1121 15:57:23.766740 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.138490 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.252347 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts\") pod \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.252712 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42d85\" (UniqueName: \"kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85\") pod \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\" (UID: \"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.254127 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" (UID: "f8fd0d7f-cf04-4fef-8825-4a5f82a76e22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.263658 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85" (OuterVolumeSpecName: "kube-api-access-42d85") pod "f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" (UID: "f8fd0d7f-cf04-4fef-8825-4a5f82a76e22"). InnerVolumeSpecName "kube-api-access-42d85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.355844 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.355887 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42d85\" (UniqueName: \"kubernetes.io/projected/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22-kube-api-access-42d85\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.494471 4967 scope.go:117] "RemoveContainer" containerID="961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66" Nov 21 15:57:24 crc kubenswrapper[4967]: E1121 15:57:24.495131 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9b44d55dc-pdsms_openstack(6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4)\"" pod="openstack/heat-api-9b44d55dc-pdsms" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.499611 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xlhnx" event={"ID":"22f15a48-c17e-477d-90d1-ea57d28f1457","Type":"ContainerDied","Data":"2e56fb6810b2286abc8dfb93a07f4fdfb0e94fd0e9034e0262abb4f418207146"} Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.499661 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e56fb6810b2286abc8dfb93a07f4fdfb0e94fd0e9034e0262abb4f418207146" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.502626 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wrdfr" event={"ID":"8fe6e979-4546-4f1f-8c36-d57c3bb578bf","Type":"ContainerDied","Data":"bb7e2c520c044d955209f2b6adbb6d18b9d375cfd59afc241d5d101acf86a296"} Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.502692 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb7e2c520c044d955209f2b6adbb6d18b9d375cfd59afc241d5d101acf86a296" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.506602 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.506588 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-2dfc-account-create-c7vxv" event={"ID":"f8fd0d7f-cf04-4fef-8825-4a5f82a76e22","Type":"ContainerDied","Data":"b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c"} Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.506669 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4ec70b0821064965d10d899521c382ea05f475f2b61d1a572b7a01d23c7d68c" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.512554 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" event={"ID":"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761","Type":"ContainerDied","Data":"61cabedcda0d54f67ae3ba07644ee91a49d041ea7caeff6f8ead24a4570b67d5"} Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.512598 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61cabedcda0d54f67ae3ba07644ee91a49d041ea7caeff6f8ead24a4570b67d5" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.516855 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerStarted","Data":"f7a1bd11183d346ad6e79bc7a8d754b69b6b041030f59cf7215bcd7f277a9bae"} Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.517819 4967 scope.go:117] "RemoveContainer" containerID="774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618" Nov 21 15:57:24 crc kubenswrapper[4967]: E1121 15:57:24.518269 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-657885494d-vrfht_openstack(dfc30aad-8b31-4143-a494-fb327041e699)\"" pod="openstack/heat-cfnapi-657885494d-vrfht" podUID="dfc30aad-8b31-4143-a494-fb327041e699" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.550293 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.567682 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.588966 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:24 crc kubenswrapper[4967]: E1121 15:57:24.616358 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8fd0d7f_cf04_4fef_8825_4a5f82a76e22.slice\": RecentStats: unable to find data in memory cache]" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.661997 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts\") pod \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662069 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts\") pod \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662260 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts\") pod \"22f15a48-c17e-477d-90d1-ea57d28f1457\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662291 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k99fh\" (UniqueName: \"kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh\") pod \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\" (UID: \"8fe6e979-4546-4f1f-8c36-d57c3bb578bf\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662346 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtqb9\" (UniqueName: \"kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9\") pod \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\" (UID: \"7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662368 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt67g\" (UniqueName: \"kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g\") pod \"22f15a48-c17e-477d-90d1-ea57d28f1457\" (UID: \"22f15a48-c17e-477d-90d1-ea57d28f1457\") " Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662727 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" (UID: "7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.662877 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "22f15a48-c17e-477d-90d1-ea57d28f1457" (UID: "22f15a48-c17e-477d-90d1-ea57d28f1457"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.663243 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.664883 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8fe6e979-4546-4f1f-8c36-d57c3bb578bf" (UID: "8fe6e979-4546-4f1f-8c36-d57c3bb578bf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.669042 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9" (OuterVolumeSpecName: "kube-api-access-gtqb9") pod "7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" (UID: "7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761"). InnerVolumeSpecName "kube-api-access-gtqb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.669345 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh" (OuterVolumeSpecName: "kube-api-access-k99fh") pod "8fe6e979-4546-4f1f-8c36-d57c3bb578bf" (UID: "8fe6e979-4546-4f1f-8c36-d57c3bb578bf"). InnerVolumeSpecName "kube-api-access-k99fh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.672867 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g" (OuterVolumeSpecName: "kube-api-access-vt67g") pod "22f15a48-c17e-477d-90d1-ea57d28f1457" (UID: "22f15a48-c17e-477d-90d1-ea57d28f1457"). InnerVolumeSpecName "kube-api-access-vt67g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.765243 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/22f15a48-c17e-477d-90d1-ea57d28f1457-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.765593 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k99fh\" (UniqueName: \"kubernetes.io/projected/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-kube-api-access-k99fh\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.765609 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtqb9\" (UniqueName: \"kubernetes.io/projected/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761-kube-api-access-gtqb9\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.765621 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt67g\" (UniqueName: \"kubernetes.io/projected/22f15a48-c17e-477d-90d1-ea57d28f1457-kube-api-access-vt67g\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:24 crc kubenswrapper[4967]: I1121 15:57:24.765637 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8fe6e979-4546-4f1f-8c36-d57c3bb578bf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:25 crc kubenswrapper[4967]: I1121 15:57:25.526886 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xlhnx" Nov 21 15:57:25 crc kubenswrapper[4967]: I1121 15:57:25.526965 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7b9f-account-create-ccd9f" Nov 21 15:57:25 crc kubenswrapper[4967]: I1121 15:57:25.526963 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wrdfr" Nov 21 15:57:25 crc kubenswrapper[4967]: I1121 15:57:25.527645 4967 scope.go:117] "RemoveContainer" containerID="961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66" Nov 21 15:57:25 crc kubenswrapper[4967]: E1121 15:57:25.527907 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-9b44d55dc-pdsms_openstack(6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4)\"" pod="openstack/heat-api-9b44d55dc-pdsms" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" Nov 21 15:57:25 crc kubenswrapper[4967]: I1121 15:57:25.872333 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.082229 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.226359 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jcvtj"] Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.226941 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22f15a48-c17e-477d-90d1-ea57d28f1457" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.226962 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="22f15a48-c17e-477d-90d1-ea57d28f1457" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.226983 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b645f4-be5e-44ce-b67d-b6e9b1661282" containerName="heat-cfnapi" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.226992 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b645f4-be5e-44ce-b67d-b6e9b1661282" containerName="heat-cfnapi" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227014 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227023 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227039 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-log" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227047 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-log" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227061 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe6e979-4546-4f1f-8c36-d57c3bb578bf" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227070 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe6e979-4546-4f1f-8c36-d57c3bb578bf" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227088 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227096 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227110 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-httpd" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227117 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-httpd" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227132 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f57422a0-a226-4bd9-8dc7-ebfee76b5745" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227142 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f57422a0-a226-4bd9-8dc7-ebfee76b5745" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: E1121 15:57:26.227165 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227174 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227529 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227547 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-httpd" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227563 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe6e979-4546-4f1f-8c36-d57c3bb578bf" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227579 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="03b645f4-be5e-44ce-b67d-b6e9b1661282" containerName="heat-cfnapi" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227589 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f57422a0-a226-4bd9-8dc7-ebfee76b5745" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227607 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="22f15a48-c17e-477d-90d1-ea57d28f1457" containerName="mariadb-database-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227619 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" containerName="glance-log" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227652 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.227668 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" containerName="mariadb-account-create" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.228717 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.232215 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.232486 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.232635 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-h4mjw" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.245953 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jcvtj"] Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.318372 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.318431 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.318522 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d82zs\" (UniqueName: \"kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.318666 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.406221 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.423854 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.423900 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.423939 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d82zs\" (UniqueName: \"kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.424017 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.431236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.440685 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.454596 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.460414 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d82zs\" (UniqueName: \"kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs\") pod \"nova-cell0-conductor-db-sync-jcvtj\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.502193 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.502510 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="dnsmasq-dns" containerID="cri-o://201b55c7f24a83c563f141af40c14947117f902d844980de09d42c970f65b8de" gracePeriod=10 Nov 21 15:57:26 crc kubenswrapper[4967]: I1121 15:57:26.556710 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.186143 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jcvtj"] Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579257 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerStarted","Data":"d0d46dfbccb8c7577d72d35bff9ad542f713813525634b6e84df4446e99c4865"} Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579487 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-central-agent" containerID="cri-o://4b912915211357dc6ae2523ac847e8c974f536d20ec92b6d83bca803bc4da52f" gracePeriod=30 Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579847 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="sg-core" containerID="cri-o://f7a1bd11183d346ad6e79bc7a8d754b69b6b041030f59cf7215bcd7f277a9bae" gracePeriod=30 Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579910 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="proxy-httpd" containerID="cri-o://d0d46dfbccb8c7577d72d35bff9ad542f713813525634b6e84df4446e99c4865" gracePeriod=30 Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579980 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-notification-agent" containerID="cri-o://b764bcf3115b4a314b250266927c14a33925a2d1a33ced19c9871713cdb99141" gracePeriod=30 Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.579994 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.587042 4967 generic.go:334] "Generic (PLEG): container finished" podID="12732ea1-9536-4f66-8a18-14aec233a88a" containerID="201b55c7f24a83c563f141af40c14947117f902d844980de09d42c970f65b8de" exitCode=0 Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.587113 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" event={"ID":"12732ea1-9536-4f66-8a18-14aec233a88a","Type":"ContainerDied","Data":"201b55c7f24a83c563f141af40c14947117f902d844980de09d42c970f65b8de"} Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.592810 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" event={"ID":"6adf654d-d462-4c77-98c5-33b5a6bd9e44","Type":"ContainerStarted","Data":"e5bba6d7836b5f74a8a27ff0d9055da911fa00cfdc844cb42a4953f8aca04c7e"} Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.613115 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.638639275 podStartE2EDuration="8.613097001s" podCreationTimestamp="2025-11-21 15:57:19 +0000 UTC" firstStartedPulling="2025-11-21 15:57:20.510833778 +0000 UTC m=+1328.769354786" lastFinishedPulling="2025-11-21 15:57:25.485291504 +0000 UTC m=+1333.743812512" observedRunningTime="2025-11-21 15:57:27.608028445 +0000 UTC m=+1335.866549453" watchObservedRunningTime="2025-11-21 15:57:27.613097001 +0000 UTC m=+1335.871618029" Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.949880 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970284 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970425 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6tq9\" (UniqueName: \"kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970524 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970565 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970624 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.970645 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb\") pod \"12732ea1-9536-4f66-8a18-14aec233a88a\" (UID: \"12732ea1-9536-4f66-8a18-14aec233a88a\") " Nov 21 15:57:27 crc kubenswrapper[4967]: I1121 15:57:27.990707 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9" (OuterVolumeSpecName: "kube-api-access-l6tq9") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "kube-api-access-l6tq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.072807 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6tq9\" (UniqueName: \"kubernetes.io/projected/12732ea1-9536-4f66-8a18-14aec233a88a-kube-api-access-l6tq9\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.138109 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.145279 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.163129 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config" (OuterVolumeSpecName: "config") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.169395 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.171654 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "12732ea1-9536-4f66-8a18-14aec233a88a" (UID: "12732ea1-9536-4f66-8a18-14aec233a88a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.174975 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.175090 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.175171 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.175250 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.175373 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12732ea1-9536-4f66-8a18-14aec233a88a-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.192820 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.244122 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.377107 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5f8c4b98b5-tmhs4" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.454923 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.455576 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5859ff54bd-hmqvr" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-api" containerID="cri-o://b70af11e3e863da51b383110c505b812605a8d1ed939cad3719295564dd19018" gracePeriod=30 Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.455726 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5859ff54bd-hmqvr" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-httpd" containerID="cri-o://f025580bf98c93b57599b9e747243fb87b1d585e13ad663372503a7e72922eaf" gracePeriod=30 Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.629650 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" event={"ID":"12732ea1-9536-4f66-8a18-14aec233a88a","Type":"ContainerDied","Data":"e1daaee4164a288422207cf90b306d25e3b9b0ff968b0bb873cb29838f724d5f"} Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.629711 4967 scope.go:117] "RemoveContainer" containerID="201b55c7f24a83c563f141af40c14947117f902d844980de09d42c970f65b8de" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.629856 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-n4kt7" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.665520 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.671062 4967 generic.go:334] "Generic (PLEG): container finished" podID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerID="f7a1bd11183d346ad6e79bc7a8d754b69b6b041030f59cf7215bcd7f277a9bae" exitCode=2 Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.671100 4967 generic.go:334] "Generic (PLEG): container finished" podID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerID="b764bcf3115b4a314b250266927c14a33925a2d1a33ced19c9871713cdb99141" exitCode=0 Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.671124 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerDied","Data":"f7a1bd11183d346ad6e79bc7a8d754b69b6b041030f59cf7215bcd7f277a9bae"} Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.671156 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerDied","Data":"b764bcf3115b4a314b250266927c14a33925a2d1a33ced19c9871713cdb99141"} Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.680385 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.682056 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-n4kt7"] Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.695612 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle\") pod \"dfc30aad-8b31-4143-a494-fb327041e699\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.695779 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data\") pod \"dfc30aad-8b31-4143-a494-fb327041e699\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.695824 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom\") pod \"dfc30aad-8b31-4143-a494-fb327041e699\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.695886 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8lw8\" (UniqueName: \"kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8\") pod \"dfc30aad-8b31-4143-a494-fb327041e699\" (UID: \"dfc30aad-8b31-4143-a494-fb327041e699\") " Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.714292 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "dfc30aad-8b31-4143-a494-fb327041e699" (UID: "dfc30aad-8b31-4143-a494-fb327041e699"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.714438 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8" (OuterVolumeSpecName: "kube-api-access-k8lw8") pod "dfc30aad-8b31-4143-a494-fb327041e699" (UID: "dfc30aad-8b31-4143-a494-fb327041e699"). InnerVolumeSpecName "kube-api-access-k8lw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.749080 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dfc30aad-8b31-4143-a494-fb327041e699" (UID: "dfc30aad-8b31-4143-a494-fb327041e699"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.773660 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data" (OuterVolumeSpecName: "config-data") pod "dfc30aad-8b31-4143-a494-fb327041e699" (UID: "dfc30aad-8b31-4143-a494-fb327041e699"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.798670 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.799459 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.799507 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/dfc30aad-8b31-4143-a494-fb327041e699-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.799519 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8lw8\" (UniqueName: \"kubernetes.io/projected/dfc30aad-8b31-4143-a494-fb327041e699-kube-api-access-k8lw8\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:28 crc kubenswrapper[4967]: I1121 15:57:28.832575 4967 scope.go:117] "RemoveContainer" containerID="1c26fb594847a70a2ad68de6745a6cfd994ef1dd03d24b73ff20f9fc64b12ba8" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.036567 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.685421 4967 generic.go:334] "Generic (PLEG): container finished" podID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerID="f025580bf98c93b57599b9e747243fb87b1d585e13ad663372503a7e72922eaf" exitCode=0 Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.685468 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerDied","Data":"f025580bf98c93b57599b9e747243fb87b1d585e13ad663372503a7e72922eaf"} Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.686968 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-657885494d-vrfht" event={"ID":"dfc30aad-8b31-4143-a494-fb327041e699","Type":"ContainerDied","Data":"c28a577d429fd166dc8838954fc2242895dcede88655e31c4ef6c49ce02c81b8"} Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.687038 4967 scope.go:117] "RemoveContainer" containerID="774ff6deec0005a457c04abf66193b29057ba358e72dfd339b65a1f3f5a1b618" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.687063 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-657885494d-vrfht" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.786397 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.796351 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-657885494d-vrfht"] Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.886088 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.886155 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.950450 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 15:57:29 crc kubenswrapper[4967]: I1121 15:57:29.951635 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 15:57:30 crc kubenswrapper[4967]: I1121 15:57:30.548075 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" path="/var/lib/kubelet/pods/12732ea1-9536-4f66-8a18-14aec233a88a/volumes" Nov 21 15:57:30 crc kubenswrapper[4967]: I1121 15:57:30.548872 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfc30aad-8b31-4143-a494-fb327041e699" path="/var/lib/kubelet/pods/dfc30aad-8b31-4143-a494-fb327041e699/volumes" Nov 21 15:57:30 crc kubenswrapper[4967]: I1121 15:57:30.698806 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:57:30 crc kubenswrapper[4967]: I1121 15:57:30.698847 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 15:57:31 crc kubenswrapper[4967]: I1121 15:57:31.558155 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 15:57:31 crc kubenswrapper[4967]: I1121 15:57:31.644184 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:31 crc kubenswrapper[4967]: I1121 15:57:31.719578 4967 generic.go:334] "Generic (PLEG): container finished" podID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerID="4b912915211357dc6ae2523ac847e8c974f536d20ec92b6d83bca803bc4da52f" exitCode=0 Nov 21 15:57:31 crc kubenswrapper[4967]: I1121 15:57:31.720085 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerDied","Data":"4b912915211357dc6ae2523ac847e8c974f536d20ec92b6d83bca803bc4da52f"} Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.133070 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.288818 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9nhjh\" (UniqueName: \"kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh\") pod \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.289017 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data\") pod \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.289059 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle\") pod \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.289160 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom\") pod \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\" (UID: \"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4\") " Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.297247 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh" (OuterVolumeSpecName: "kube-api-access-9nhjh") pod "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" (UID: "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4"). InnerVolumeSpecName "kube-api-access-9nhjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.299004 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" (UID: "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.337698 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" (UID: "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.367550 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data" (OuterVolumeSpecName: "config-data") pod "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" (UID: "6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.391645 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.391702 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9nhjh\" (UniqueName: \"kubernetes.io/projected/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-kube-api-access-9nhjh\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.391721 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.391732 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.746539 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9b44d55dc-pdsms" event={"ID":"6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4","Type":"ContainerDied","Data":"542636f858f2c65741052ea66bf9657766f5310d7fbfbe2823f622170e6d1307"} Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.746906 4967 scope.go:117] "RemoveContainer" containerID="961522eccfc4e9d4e91fcbe2de3d89286da43ff683414e07f83ddfc024295b66" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.747110 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9b44d55dc-pdsms" Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.772986 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:32 crc kubenswrapper[4967]: I1121 15:57:32.782825 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-9b44d55dc-pdsms"] Nov 21 15:57:33 crc kubenswrapper[4967]: I1121 15:57:33.839279 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 15:57:33 crc kubenswrapper[4967]: I1121 15:57:33.900533 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:33 crc kubenswrapper[4967]: I1121 15:57:33.900786 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-59bbb4975-mxdmz" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" containerID="cri-o://b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" gracePeriod=60 Nov 21 15:57:34 crc kubenswrapper[4967]: I1121 15:57:34.549062 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" path="/var/lib/kubelet/pods/6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4/volumes" Nov 21 15:57:36 crc kubenswrapper[4967]: E1121 15:57:36.012231 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:36 crc kubenswrapper[4967]: E1121 15:57:36.013528 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:36 crc kubenswrapper[4967]: E1121 15:57:36.015892 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:36 crc kubenswrapper[4967]: E1121 15:57:36.015955 4967 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-59bbb4975-mxdmz" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:57:40 crc kubenswrapper[4967]: I1121 15:57:40.872837 4967 generic.go:334] "Generic (PLEG): container finished" podID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerID="b70af11e3e863da51b383110c505b812605a8d1ed939cad3719295564dd19018" exitCode=0 Nov 21 15:57:40 crc kubenswrapper[4967]: I1121 15:57:40.873065 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerDied","Data":"b70af11e3e863da51b383110c505b812605a8d1ed939cad3719295564dd19018"} Nov 21 15:57:46 crc kubenswrapper[4967]: E1121 15:57:46.012629 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:46 crc kubenswrapper[4967]: E1121 15:57:46.015508 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:46 crc kubenswrapper[4967]: E1121 15:57:46.019887 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:46 crc kubenswrapper[4967]: E1121 15:57:46.019968 4967 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-59bbb4975-mxdmz" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:57:46 crc kubenswrapper[4967]: I1121 15:57:46.522773 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:57:46 crc kubenswrapper[4967]: I1121 15:57:46.522836 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:57:49 crc kubenswrapper[4967]: I1121 15:57:49.724046 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 15:57:50 crc kubenswrapper[4967]: I1121 15:57:50.655355 4967 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7c73f5a0_e6d9_439d_be71_aa94fbdc6c4f.slice" Nov 21 15:57:50 crc kubenswrapper[4967]: E1121 15:57:50.655758 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f] : Timed out while waiting for systemd to remove kubepods-besteffort-pod7c73f5a0_e6d9_439d_be71_aa94fbdc6c4f.slice" pod="openstack/glance-default-internal-api-0" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" Nov 21 15:57:50 crc kubenswrapper[4967]: I1121 15:57:50.994188 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.027968 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.047361 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.064170 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.066485 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.066530 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.066545 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.066551 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.066569 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="init" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.066574 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="init" Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.066606 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.066612 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.066682 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="dnsmasq-dns" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.066688 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="dnsmasq-dns" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.067798 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.067839 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6485dc15-e7ba-4abd-9a28-73b7ea9aa4b4" containerName="heat-api" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.067849 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.067862 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="12732ea1-9536-4f66-8a18-14aec233a88a" containerName="dnsmasq-dns" Nov 21 15:57:51 crc kubenswrapper[4967]: E1121 15:57:51.068135 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.068143 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.068752 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfc30aad-8b31-4143-a494-fb327041e699" containerName="heat-cfnapi" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.070356 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.080952 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.081140 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.084172 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129684 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129750 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129812 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129868 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129892 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129927 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5wvn\" (UniqueName: \"kubernetes.io/projected/2b52e62c-c848-4f1d-8886-1d799e1d41da-kube-api-access-m5wvn\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.129976 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.130038 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-logs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232143 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232211 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232271 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232304 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232352 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232403 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5wvn\" (UniqueName: \"kubernetes.io/projected/2b52e62c-c848-4f1d-8886-1d799e1d41da-kube-api-access-m5wvn\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232443 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.232523 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-logs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.233272 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.274485 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.275060 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b52e62c-c848-4f1d-8886-1d799e1d41da-logs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.284300 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5wvn\" (UniqueName: \"kubernetes.io/projected/2b52e62c-c848-4f1d-8886-1d799e1d41da-kube-api-access-m5wvn\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.284323 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.285973 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.287236 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.288367 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b52e62c-c848-4f1d-8886-1d799e1d41da-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.355833 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"glance-default-internal-api-0\" (UID: \"2b52e62c-c848-4f1d-8886-1d799e1d41da\") " pod="openstack/glance-default-internal-api-0" Nov 21 15:57:51 crc kubenswrapper[4967]: I1121 15:57:51.575209 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 15:57:52 crc kubenswrapper[4967]: I1121 15:57:52.631740 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f" path="/var/lib/kubelet/pods/7c73f5a0-e6d9-439d-be71-aa94fbdc6c4f/volumes" Nov 21 15:57:56 crc kubenswrapper[4967]: E1121 15:57:56.011746 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d is running failed: container process not found" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:56 crc kubenswrapper[4967]: E1121 15:57:56.015579 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d is running failed: container process not found" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:56 crc kubenswrapper[4967]: E1121 15:57:56.016466 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d is running failed: container process not found" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 15:57:56 crc kubenswrapper[4967]: E1121 15:57:56.016532 4967 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d is running failed: container process not found" probeType="Readiness" pod="openstack/heat-engine-59bbb4975-mxdmz" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:57:56 crc kubenswrapper[4967]: I1121 15:57:56.046561 4967 generic.go:334] "Generic (PLEG): container finished" podID="f87aa54b-1478-41af-a049-56a703a25f04" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" exitCode=0 Nov 21 15:57:56 crc kubenswrapper[4967]: I1121 15:57:56.046586 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59bbb4975-mxdmz" event={"ID":"f87aa54b-1478-41af-a049-56a703a25f04","Type":"ContainerDied","Data":"b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d"} Nov 21 15:57:56 crc kubenswrapper[4967]: I1121 15:57:56.968284 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 15:57:56 crc kubenswrapper[4967]: I1121 15:57:56.968486 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:57:56 crc kubenswrapper[4967]: I1121 15:57:56.973089 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.112536 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5859ff54bd-hmqvr" event={"ID":"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff","Type":"ContainerDied","Data":"b73de64291624c449e3db90ffbad63a6feea0d94ddefc402ece2357ac9267b84"} Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.112944 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b73de64291624c449e3db90ffbad63a6feea0d94ddefc402ece2357ac9267b84" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.139020 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.187696 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config\") pod \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.187853 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs\") pod \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.188005 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config\") pod \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.188102 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srjwm\" (UniqueName: \"kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm\") pod \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.188184 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle\") pod \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\" (UID: \"7e094f7a-bff7-4c67-92a4-f20a6d05e9ff\") " Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.254571 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm" (OuterVolumeSpecName: "kube-api-access-srjwm") pod "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" (UID: "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff"). InnerVolumeSpecName "kube-api-access-srjwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.278497 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" (UID: "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.281022 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" (UID: "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.291093 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config" (OuterVolumeSpecName: "config") pod "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" (UID: "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.291640 4967 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.291676 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.291688 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srjwm\" (UniqueName: \"kubernetes.io/projected/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-kube-api-access-srjwm\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.291702 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.368041 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" (UID: "7e094f7a-bff7-4c67-92a4-f20a6d05e9ff"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:57 crc kubenswrapper[4967]: I1121 15:57:57.394124 4967 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.129029 4967 generic.go:334] "Generic (PLEG): container finished" podID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerID="d0d46dfbccb8c7577d72d35bff9ad542f713813525634b6e84df4446e99c4865" exitCode=137 Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.129100 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerDied","Data":"d0d46dfbccb8c7577d72d35bff9ad542f713813525634b6e84df4446e99c4865"} Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.129392 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5859ff54bd-hmqvr" Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.165607 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.176815 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5859ff54bd-hmqvr"] Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.243426 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.705425 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.723160 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" path="/var/lib/kubelet/pods/7e094f7a-bff7-4c67-92a4-f20a6d05e9ff/volumes" Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.904619 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom\") pod \"f87aa54b-1478-41af-a049-56a703a25f04\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.904699 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data\") pod \"f87aa54b-1478-41af-a049-56a703a25f04\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.904828 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9fqx\" (UniqueName: \"kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx\") pod \"f87aa54b-1478-41af-a049-56a703a25f04\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.904856 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle\") pod \"f87aa54b-1478-41af-a049-56a703a25f04\" (UID: \"f87aa54b-1478-41af-a049-56a703a25f04\") " Nov 21 15:57:58 crc kubenswrapper[4967]: I1121 15:57:58.915508 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f87aa54b-1478-41af-a049-56a703a25f04" (UID: "f87aa54b-1478-41af-a049-56a703a25f04"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.013549 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f87aa54b-1478-41af-a049-56a703a25f04" (UID: "f87aa54b-1478-41af-a049-56a703a25f04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.017010 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.017056 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.057450 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx" (OuterVolumeSpecName: "kube-api-access-w9fqx") pod "f87aa54b-1478-41af-a049-56a703a25f04" (UID: "f87aa54b-1478-41af-a049-56a703a25f04"). InnerVolumeSpecName "kube-api-access-w9fqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.119025 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9fqx\" (UniqueName: \"kubernetes.io/projected/f87aa54b-1478-41af-a049-56a703a25f04-kube-api-access-w9fqx\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.146175 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2b52e62c-c848-4f1d-8886-1d799e1d41da","Type":"ContainerStarted","Data":"a8ffc5adf98e0b6a7b21720fb06c4807f62fb3e54028a89372c3655fe401600f"} Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.147473 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59bbb4975-mxdmz" event={"ID":"f87aa54b-1478-41af-a049-56a703a25f04","Type":"ContainerDied","Data":"ab907e5c8acb67e6d6c9fe0ebf8aa8cd3eeb87f939393c1b0e26eab8ecd5e1cd"} Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.147504 4967 scope.go:117] "RemoveContainer" containerID="b3be32a588a4c9ebac91ad9de3f0fe829c551fddd0a700a75870cd52e153424d" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.147618 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59bbb4975-mxdmz" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.158869 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data" (OuterVolumeSpecName: "config-data") pod "f87aa54b-1478-41af-a049-56a703a25f04" (UID: "f87aa54b-1478-41af-a049-56a703a25f04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.220404 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87aa54b-1478-41af-a049-56a703a25f04-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.479434 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:59 crc kubenswrapper[4967]: I1121 15:57:59.488516 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-59bbb4975-mxdmz"] Nov 21 15:57:59 crc kubenswrapper[4967]: E1121 15:57:59.788858 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Nov 21 15:57:59 crc kubenswrapper[4967]: E1121 15:57:59.789217 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d82zs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-jcvtj_openstack(6adf654d-d462-4c77-98c5-33b5a6bd9e44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 15:57:59 crc kubenswrapper[4967]: E1121 15:57:59.790490 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.163651 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9007259a-cb0c-47a2-8eac-f473b82d4422","Type":"ContainerDied","Data":"6adaf537fed5167d0086a4a94b90a55d30efb02448151c8ca29fb1f81e8f0da7"} Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.164021 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6adaf537fed5167d0086a4a94b90a55d30efb02448151c8ca29fb1f81e8f0da7" Nov 21 15:58:00 crc kubenswrapper[4967]: E1121 15:58:00.168491 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.231383 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.235437 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.235514 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.235823 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.235898 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.236072 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.236190 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q276s\" (UniqueName: \"kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.236241 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle\") pod \"9007259a-cb0c-47a2-8eac-f473b82d4422\" (UID: \"9007259a-cb0c-47a2-8eac-f473b82d4422\") " Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.237167 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.237443 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.241777 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s" (OuterVolumeSpecName: "kube-api-access-q276s") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "kube-api-access-q276s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.242058 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts" (OuterVolumeSpecName: "scripts") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.278995 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.338149 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.338187 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.338199 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q276s\" (UniqueName: \"kubernetes.io/projected/9007259a-cb0c-47a2-8eac-f473b82d4422-kube-api-access-q276s\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.338208 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.338218 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9007259a-cb0c-47a2-8eac-f473b82d4422-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.355750 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.378266 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data" (OuterVolumeSpecName: "config-data") pod "9007259a-cb0c-47a2-8eac-f473b82d4422" (UID: "9007259a-cb0c-47a2-8eac-f473b82d4422"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.440822 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.440864 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9007259a-cb0c-47a2-8eac-f473b82d4422-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:00 crc kubenswrapper[4967]: I1121 15:58:00.548797 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f87aa54b-1478-41af-a049-56a703a25f04" path="/var/lib/kubelet/pods/f87aa54b-1478-41af-a049-56a703a25f04/volumes" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.192840 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2b52e62c-c848-4f1d-8886-1d799e1d41da","Type":"ContainerStarted","Data":"4931fe1ad49b2efd34d7a8fa17da62bcfff7d38e5616558f94a18b614b80649b"} Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.192900 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.229283 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.243880 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.267257 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268054 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-api" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268073 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-api" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268096 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268102 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268120 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-notification-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268127 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-notification-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268140 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-central-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268146 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-central-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268162 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="sg-core" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268168 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="sg-core" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268213 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="proxy-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268220 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="proxy-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: E1121 15:58:01.268251 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.268257 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272455 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="sg-core" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272507 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87aa54b-1478-41af-a049-56a703a25f04" containerName="heat-engine" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272514 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="proxy-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272535 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-central-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272545 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-api" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272556 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-httpd" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.272569 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" containerName="ceilometer-notification-agent" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.274653 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.276736 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.276920 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.281161 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.356698 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.356760 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.356837 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.357032 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.357103 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq5nd\" (UniqueName: \"kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.357208 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.357304 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.459749 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.459806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.459880 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.459936 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.459963 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq5nd\" (UniqueName: \"kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.460028 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.460064 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.460391 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.460579 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.464407 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.465030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.467717 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.474640 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.480394 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq5nd\" (UniqueName: \"kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd\") pod \"ceilometer-0\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " pod="openstack/ceilometer-0" Nov 21 15:58:01 crc kubenswrapper[4967]: I1121 15:58:01.595602 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:02 crc kubenswrapper[4967]: I1121 15:58:02.095296 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:02 crc kubenswrapper[4967]: W1121 15:58:02.098941 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7bf9e09a_a8e5_4430_bb71_600ade140743.slice/crio-966a6880024bc4a489ead6c33e2ab23fda77cbfd33462be4c285a1c6b5be2f67 WatchSource:0}: Error finding container 966a6880024bc4a489ead6c33e2ab23fda77cbfd33462be4c285a1c6b5be2f67: Status 404 returned error can't find the container with id 966a6880024bc4a489ead6c33e2ab23fda77cbfd33462be4c285a1c6b5be2f67 Nov 21 15:58:02 crc kubenswrapper[4967]: I1121 15:58:02.208632 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2b52e62c-c848-4f1d-8886-1d799e1d41da","Type":"ContainerStarted","Data":"de003f78e942fe8e7d3e676d3350dc5891cd5e35f26d98c1daeb879d82010da1"} Nov 21 15:58:02 crc kubenswrapper[4967]: I1121 15:58:02.211349 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerStarted","Data":"966a6880024bc4a489ead6c33e2ab23fda77cbfd33462be4c285a1c6b5be2f67"} Nov 21 15:58:02 crc kubenswrapper[4967]: I1121 15:58:02.244569 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=11.244551032 podStartE2EDuration="11.244551032s" podCreationTimestamp="2025-11-21 15:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:58:02.242976876 +0000 UTC m=+1370.501497874" watchObservedRunningTime="2025-11-21 15:58:02.244551032 +0000 UTC m=+1370.503072040" Nov 21 15:58:02 crc kubenswrapper[4967]: I1121 15:58:02.593448 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9007259a-cb0c-47a2-8eac-f473b82d4422" path="/var/lib/kubelet/pods/9007259a-cb0c-47a2-8eac-f473b82d4422/volumes" Nov 21 15:58:10 crc kubenswrapper[4967]: I1121 15:58:10.279937 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:11 crc kubenswrapper[4967]: I1121 15:58:11.308300 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerStarted","Data":"52ec06095a37e710acfa199493d6e5a478c2803c258d0d23612988f4010e01a1"} Nov 21 15:58:11 crc kubenswrapper[4967]: I1121 15:58:11.577412 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:11 crc kubenswrapper[4967]: I1121 15:58:11.577981 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:12 crc kubenswrapper[4967]: I1121 15:58:12.077748 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:12 crc kubenswrapper[4967]: I1121 15:58:12.078140 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:12 crc kubenswrapper[4967]: I1121 15:58:12.321552 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:12 crc kubenswrapper[4967]: I1121 15:58:12.322894 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:14 crc kubenswrapper[4967]: I1121 15:58:14.341304 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:14 crc kubenswrapper[4967]: I1121 15:58:14.341519 4967 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 15:58:14 crc kubenswrapper[4967]: I1121 15:58:14.395008 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 15:58:16 crc kubenswrapper[4967]: I1121 15:58:16.522778 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:58:16 crc kubenswrapper[4967]: I1121 15:58:16.523178 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.401113 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerStarted","Data":"f9d11955e07c20672330fd1c05dc728c28837bec473008a759b6a610794c9b6e"} Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.608048 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-fhn2k"] Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.610204 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.620075 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-fhn2k"] Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.703944 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-7569-account-create-lgmgg"] Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.705702 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.707658 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.721172 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.721478 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtkmm\" (UniqueName: \"kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.823726 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.823787 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.823927 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9thxg\" (UniqueName: \"kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.823985 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtkmm\" (UniqueName: \"kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.824872 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.837322 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-7569-account-create-lgmgg"] Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.848043 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtkmm\" (UniqueName: \"kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm\") pod \"aodh-db-create-fhn2k\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.927197 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.927360 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9thxg\" (UniqueName: \"kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.928441 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.933284 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:20 crc kubenswrapper[4967]: I1121 15:58:20.945396 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9thxg\" (UniqueName: \"kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg\") pod \"aodh-7569-account-create-lgmgg\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.025734 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.432849 4967 generic.go:334] "Generic (PLEG): container finished" podID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerID="4e0bfc97a856884e79374721220be718f212cf77f99db97d941188c339190546" exitCode=137 Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.433263 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65548fddc5-76rgx" event={"ID":"89017806-bb3a-4c00-b40c-5f600c61ecff","Type":"ContainerDied","Data":"4e0bfc97a856884e79374721220be718f212cf77f99db97d941188c339190546"} Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.548064 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.673875 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9kmv\" (UniqueName: \"kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv\") pod \"89017806-bb3a-4c00-b40c-5f600c61ecff\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.674252 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data\") pod \"89017806-bb3a-4c00-b40c-5f600c61ecff\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.674514 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom\") pod \"89017806-bb3a-4c00-b40c-5f600c61ecff\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.674963 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle\") pod \"89017806-bb3a-4c00-b40c-5f600c61ecff\" (UID: \"89017806-bb3a-4c00-b40c-5f600c61ecff\") " Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.679636 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "89017806-bb3a-4c00-b40c-5f600c61ecff" (UID: "89017806-bb3a-4c00-b40c-5f600c61ecff"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.680062 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv" (OuterVolumeSpecName: "kube-api-access-l9kmv") pod "89017806-bb3a-4c00-b40c-5f600c61ecff" (UID: "89017806-bb3a-4c00-b40c-5f600c61ecff"). InnerVolumeSpecName "kube-api-access-l9kmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.729359 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89017806-bb3a-4c00-b40c-5f600c61ecff" (UID: "89017806-bb3a-4c00-b40c-5f600c61ecff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.743562 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data" (OuterVolumeSpecName: "config-data") pod "89017806-bb3a-4c00-b40c-5f600c61ecff" (UID: "89017806-bb3a-4c00-b40c-5f600c61ecff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.780391 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.780433 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.780449 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9kmv\" (UniqueName: \"kubernetes.io/projected/89017806-bb3a-4c00-b40c-5f600c61ecff-kube-api-access-l9kmv\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.780464 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89017806-bb3a-4c00-b40c-5f600c61ecff-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.831761 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-fhn2k"] Nov 21 15:58:21 crc kubenswrapper[4967]: I1121 15:58:21.957253 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-7569-account-create-lgmgg"] Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.447532 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-65548fddc5-76rgx" Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.447539 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-65548fddc5-76rgx" event={"ID":"89017806-bb3a-4c00-b40c-5f600c61ecff","Type":"ContainerDied","Data":"6c4d8594bc08a78f291fb9f45443b02ea3bf0e89d7c53cd017125baf9516b8c1"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.448113 4967 scope.go:117] "RemoveContainer" containerID="4e0bfc97a856884e79374721220be718f212cf77f99db97d941188c339190546" Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.450898 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" event={"ID":"6adf654d-d462-4c77-98c5-33b5a6bd9e44","Type":"ContainerStarted","Data":"e45d4e062cfb62dfb5575a82f61551d76d148cd72b894b09b0c7b3734bff241e"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.453834 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerStarted","Data":"1c5e5590f40c17355a1999cb0202fcc9ed23fd931fe4e24434069ac3c8c31fbb"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.458263 4967 generic.go:334] "Generic (PLEG): container finished" podID="f95620e9-c5f1-4947-ac6f-9552b76dc96c" containerID="d2ee832382dc1cc718f6950638826b8fa3c219d55626644d5e70e941327cf3a8" exitCode=0 Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.458364 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-fhn2k" event={"ID":"f95620e9-c5f1-4947-ac6f-9552b76dc96c","Type":"ContainerDied","Data":"d2ee832382dc1cc718f6950638826b8fa3c219d55626644d5e70e941327cf3a8"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.458389 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-fhn2k" event={"ID":"f95620e9-c5f1-4947-ac6f-9552b76dc96c","Type":"ContainerStarted","Data":"55ab8206f62a2d020a50d31b70ac33e2aea04903bfe2e8e7f2c381d87b618854"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.466064 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7569-account-create-lgmgg" event={"ID":"862f7b87-b5b9-4c04-a219-b44da3e3b16d","Type":"ContainerStarted","Data":"833feda4ee0b6e294c94ad306d993a359e2b8ed014c52490b58f625057a45ad7"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.466102 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7569-account-create-lgmgg" event={"ID":"862f7b87-b5b9-4c04-a219-b44da3e3b16d","Type":"ContainerStarted","Data":"1ad4b8d415b47d8d09d0f0806679cfdc80cb2485a4bd973c8678814fe14060ae"} Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.481070 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" podStartSLOduration=2.713133743 podStartE2EDuration="56.481048777s" podCreationTimestamp="2025-11-21 15:57:26 +0000 UTC" firstStartedPulling="2025-11-21 15:57:27.190386992 +0000 UTC m=+1335.448908000" lastFinishedPulling="2025-11-21 15:58:20.958302026 +0000 UTC m=+1389.216823034" observedRunningTime="2025-11-21 15:58:22.472123345 +0000 UTC m=+1390.730644363" watchObservedRunningTime="2025-11-21 15:58:22.481048777 +0000 UTC m=+1390.739569785" Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.550433 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:58:22 crc kubenswrapper[4967]: I1121 15:58:22.553799 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-65548fddc5-76rgx"] Nov 21 15:58:23 crc kubenswrapper[4967]: I1121 15:58:23.495127 4967 generic.go:334] "Generic (PLEG): container finished" podID="862f7b87-b5b9-4c04-a219-b44da3e3b16d" containerID="833feda4ee0b6e294c94ad306d993a359e2b8ed014c52490b58f625057a45ad7" exitCode=0 Nov 21 15:58:23 crc kubenswrapper[4967]: I1121 15:58:23.495196 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7569-account-create-lgmgg" event={"ID":"862f7b87-b5b9-4c04-a219-b44da3e3b16d","Type":"ContainerDied","Data":"833feda4ee0b6e294c94ad306d993a359e2b8ed014c52490b58f625057a45ad7"} Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.053428 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.061362 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.140713 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts\") pod \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.140912 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtkmm\" (UniqueName: \"kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm\") pod \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.140985 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts\") pod \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\" (UID: \"f95620e9-c5f1-4947-ac6f-9552b76dc96c\") " Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.141079 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9thxg\" (UniqueName: \"kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg\") pod \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\" (UID: \"862f7b87-b5b9-4c04-a219-b44da3e3b16d\") " Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.141452 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "862f7b87-b5b9-4c04-a219-b44da3e3b16d" (UID: "862f7b87-b5b9-4c04-a219-b44da3e3b16d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.141473 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f95620e9-c5f1-4947-ac6f-9552b76dc96c" (UID: "f95620e9-c5f1-4947-ac6f-9552b76dc96c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.142589 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/862f7b87-b5b9-4c04-a219-b44da3e3b16d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.142614 4967 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95620e9-c5f1-4947-ac6f-9552b76dc96c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.146555 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg" (OuterVolumeSpecName: "kube-api-access-9thxg") pod "862f7b87-b5b9-4c04-a219-b44da3e3b16d" (UID: "862f7b87-b5b9-4c04-a219-b44da3e3b16d"). InnerVolumeSpecName "kube-api-access-9thxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.146604 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm" (OuterVolumeSpecName: "kube-api-access-xtkmm") pod "f95620e9-c5f1-4947-ac6f-9552b76dc96c" (UID: "f95620e9-c5f1-4947-ac6f-9552b76dc96c"). InnerVolumeSpecName "kube-api-access-xtkmm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.244238 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtkmm\" (UniqueName: \"kubernetes.io/projected/f95620e9-c5f1-4947-ac6f-9552b76dc96c-kube-api-access-xtkmm\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.244276 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9thxg\" (UniqueName: \"kubernetes.io/projected/862f7b87-b5b9-4c04-a219-b44da3e3b16d-kube-api-access-9thxg\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.512159 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-7569-account-create-lgmgg" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.512175 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-7569-account-create-lgmgg" event={"ID":"862f7b87-b5b9-4c04-a219-b44da3e3b16d","Type":"ContainerDied","Data":"1ad4b8d415b47d8d09d0f0806679cfdc80cb2485a4bd973c8678814fe14060ae"} Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.512625 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ad4b8d415b47d8d09d0f0806679cfdc80cb2485a4bd973c8678814fe14060ae" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515169 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerStarted","Data":"41dc6688d3df6a08b768d8cd2256b9b21b9daac806428feda3157cd0eaa8d932"} Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515290 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-central-agent" containerID="cri-o://52ec06095a37e710acfa199493d6e5a478c2803c258d0d23612988f4010e01a1" gracePeriod=30 Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515329 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="proxy-httpd" containerID="cri-o://41dc6688d3df6a08b768d8cd2256b9b21b9daac806428feda3157cd0eaa8d932" gracePeriod=30 Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515304 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515351 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="sg-core" containerID="cri-o://1c5e5590f40c17355a1999cb0202fcc9ed23fd931fe4e24434069ac3c8c31fbb" gracePeriod=30 Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.515387 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-notification-agent" containerID="cri-o://f9d11955e07c20672330fd1c05dc728c28837bec473008a759b6a610794c9b6e" gracePeriod=30 Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.517995 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-fhn2k" event={"ID":"f95620e9-c5f1-4947-ac6f-9552b76dc96c","Type":"ContainerDied","Data":"55ab8206f62a2d020a50d31b70ac33e2aea04903bfe2e8e7f2c381d87b618854"} Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.518034 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="55ab8206f62a2d020a50d31b70ac33e2aea04903bfe2e8e7f2c381d87b618854" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.518033 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-fhn2k" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.542966 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.354670825 podStartE2EDuration="23.542940869s" podCreationTimestamp="2025-11-21 15:58:01 +0000 UTC" firstStartedPulling="2025-11-21 15:58:02.102271929 +0000 UTC m=+1370.360792937" lastFinishedPulling="2025-11-21 15:58:23.290541973 +0000 UTC m=+1391.549062981" observedRunningTime="2025-11-21 15:58:24.536650004 +0000 UTC m=+1392.795171022" watchObservedRunningTime="2025-11-21 15:58:24.542940869 +0000 UTC m=+1392.801461877" Nov 21 15:58:24 crc kubenswrapper[4967]: I1121 15:58:24.552179 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" path="/var/lib/kubelet/pods/89017806-bb3a-4c00-b40c-5f600c61ecff/volumes" Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531654 4967 generic.go:334] "Generic (PLEG): container finished" podID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerID="41dc6688d3df6a08b768d8cd2256b9b21b9daac806428feda3157cd0eaa8d932" exitCode=0 Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531696 4967 generic.go:334] "Generic (PLEG): container finished" podID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerID="1c5e5590f40c17355a1999cb0202fcc9ed23fd931fe4e24434069ac3c8c31fbb" exitCode=2 Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531707 4967 generic.go:334] "Generic (PLEG): container finished" podID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerID="f9d11955e07c20672330fd1c05dc728c28837bec473008a759b6a610794c9b6e" exitCode=0 Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531741 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerDied","Data":"41dc6688d3df6a08b768d8cd2256b9b21b9daac806428feda3157cd0eaa8d932"} Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531801 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerDied","Data":"1c5e5590f40c17355a1999cb0202fcc9ed23fd931fe4e24434069ac3c8c31fbb"} Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.531812 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerDied","Data":"f9d11955e07c20672330fd1c05dc728c28837bec473008a759b6a610794c9b6e"} Nov 21 15:58:25 crc kubenswrapper[4967]: I1121 15:58:25.858436 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-5859ff54bd-hmqvr" podUID="7e094f7a-bff7-4c67-92a4-f20a6d05e9ff" containerName="neutron-httpd" probeResult="failure" output="Get \"http://10.217.0.203:9696/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.015370 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-9djfm"] Nov 21 15:58:26 crc kubenswrapper[4967]: E1121 15:58:26.016064 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="862f7b87-b5b9-4c04-a219-b44da3e3b16d" containerName="mariadb-account-create" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016093 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="862f7b87-b5b9-4c04-a219-b44da3e3b16d" containerName="mariadb-account-create" Nov 21 15:58:26 crc kubenswrapper[4967]: E1121 15:58:26.016130 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerName="heat-api" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016140 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerName="heat-api" Nov 21 15:58:26 crc kubenswrapper[4967]: E1121 15:58:26.016157 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95620e9-c5f1-4947-ac6f-9552b76dc96c" containerName="mariadb-database-create" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016166 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95620e9-c5f1-4947-ac6f-9552b76dc96c" containerName="mariadb-database-create" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016491 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerName="heat-api" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016527 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="862f7b87-b5b9-4c04-a219-b44da3e3b16d" containerName="mariadb-account-create" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.016564 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95620e9-c5f1-4947-ac6f-9552b76dc96c" containerName="mariadb-database-create" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.017681 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.020054 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.021586 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6bsgb" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.021773 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.021926 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.028555 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-9djfm"] Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.089021 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.089207 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.089383 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.089439 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbqxk\" (UniqueName: \"kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.191227 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.191320 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbqxk\" (UniqueName: \"kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.191369 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.191517 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.197342 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.197508 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.207447 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.210189 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbqxk\" (UniqueName: \"kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk\") pod \"aodh-db-sync-9djfm\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.341679 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:26 crc kubenswrapper[4967]: W1121 15:58:26.876778 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod612fd1ac_5081_454b_946f_85dff74ddf0c.slice/crio-6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd WatchSource:0}: Error finding container 6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd: Status 404 returned error can't find the container with id 6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd Nov 21 15:58:26 crc kubenswrapper[4967]: I1121 15:58:26.880129 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-9djfm"] Nov 21 15:58:27 crc kubenswrapper[4967]: I1121 15:58:27.564846 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-9djfm" event={"ID":"612fd1ac-5081-454b-946f-85dff74ddf0c","Type":"ContainerStarted","Data":"6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd"} Nov 21 15:58:29 crc kubenswrapper[4967]: I1121 15:58:29.588760 4967 generic.go:334] "Generic (PLEG): container finished" podID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerID="52ec06095a37e710acfa199493d6e5a478c2803c258d0d23612988f4010e01a1" exitCode=0 Nov 21 15:58:29 crc kubenswrapper[4967]: I1121 15:58:29.588844 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerDied","Data":"52ec06095a37e710acfa199493d6e5a478c2803c258d0d23612988f4010e01a1"} Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.242287 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.317662 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318072 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq5nd\" (UniqueName: \"kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318182 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318234 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318275 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318336 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318431 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd\") pod \"7bf9e09a-a8e5-4430-bb71-600ade140743\" (UID: \"7bf9e09a-a8e5-4430-bb71-600ade140743\") " Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.318691 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.319156 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.319267 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.324112 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts" (OuterVolumeSpecName: "scripts") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.325044 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd" (OuterVolumeSpecName: "kube-api-access-wq5nd") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "kube-api-access-wq5nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.352384 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.387987 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-65548fddc5-76rgx" podUID="89017806-bb3a-4c00-b40c-5f600c61ecff" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.211:8004/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.410709 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.421913 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq5nd\" (UniqueName: \"kubernetes.io/projected/7bf9e09a-a8e5-4430-bb71-600ade140743-kube-api-access-wq5nd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.421956 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.421970 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.421981 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.421993 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7bf9e09a-a8e5-4430-bb71-600ade140743-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.444243 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data" (OuterVolumeSpecName: "config-data") pod "7bf9e09a-a8e5-4430-bb71-600ade140743" (UID: "7bf9e09a-a8e5-4430-bb71-600ade140743"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.524752 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bf9e09a-a8e5-4430-bb71-600ade140743-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.611515 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-9djfm" event={"ID":"612fd1ac-5081-454b-946f-85dff74ddf0c","Type":"ContainerStarted","Data":"457e37dcd3010e5e4e5e0d136b9876e4dca552466e91884af6e37ff30a286cda"} Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.614443 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7bf9e09a-a8e5-4430-bb71-600ade140743","Type":"ContainerDied","Data":"966a6880024bc4a489ead6c33e2ab23fda77cbfd33462be4c285a1c6b5be2f67"} Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.614501 4967 scope.go:117] "RemoveContainer" containerID="41dc6688d3df6a08b768d8cd2256b9b21b9daac806428feda3157cd0eaa8d932" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.614509 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.641059 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-9djfm" podStartSLOduration=2.4735405249999998 podStartE2EDuration="6.641042462s" podCreationTimestamp="2025-11-21 15:58:25 +0000 UTC" firstStartedPulling="2025-11-21 15:58:26.879295957 +0000 UTC m=+1395.137816965" lastFinishedPulling="2025-11-21 15:58:31.046797894 +0000 UTC m=+1399.305318902" observedRunningTime="2025-11-21 15:58:31.634621503 +0000 UTC m=+1399.893142521" watchObservedRunningTime="2025-11-21 15:58:31.641042462 +0000 UTC m=+1399.899563470" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.645450 4967 scope.go:117] "RemoveContainer" containerID="1c5e5590f40c17355a1999cb0202fcc9ed23fd931fe4e24434069ac3c8c31fbb" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.668398 4967 scope.go:117] "RemoveContainer" containerID="f9d11955e07c20672330fd1c05dc728c28837bec473008a759b6a610794c9b6e" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.673581 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.691272 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.701614 4967 scope.go:117] "RemoveContainer" containerID="52ec06095a37e710acfa199493d6e5a478c2803c258d0d23612988f4010e01a1" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.706737 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:31 crc kubenswrapper[4967]: E1121 15:58:31.707465 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-central-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.707497 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-central-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: E1121 15:58:31.707521 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="proxy-httpd" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.707531 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="proxy-httpd" Nov 21 15:58:31 crc kubenswrapper[4967]: E1121 15:58:31.707544 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-notification-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.707553 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-notification-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: E1121 15:58:31.707580 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="sg-core" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.707591 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="sg-core" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.708103 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-central-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.708127 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="sg-core" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.708147 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="proxy-httpd" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.708176 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" containerName="ceilometer-notification-agent" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.711999 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.717741 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.718055 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.718725 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.729933 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730102 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730157 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730249 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730289 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730371 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.730433 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l94t\" (UniqueName: \"kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.831889 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.831951 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.831991 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832020 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l94t\" (UniqueName: \"kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832067 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832137 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832170 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832601 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.832698 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.836970 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.837668 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.838041 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.839857 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:31 crc kubenswrapper[4967]: I1121 15:58:31.851794 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l94t\" (UniqueName: \"kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t\") pod \"ceilometer-0\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " pod="openstack/ceilometer-0" Nov 21 15:58:32 crc kubenswrapper[4967]: I1121 15:58:32.031988 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:32 crc kubenswrapper[4967]: I1121 15:58:32.504259 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:32 crc kubenswrapper[4967]: W1121 15:58:32.508748 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf41b3fb0_7b2a_489d_a288_337011aa8cb4.slice/crio-0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351 WatchSource:0}: Error finding container 0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351: Status 404 returned error can't find the container with id 0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351 Nov 21 15:58:32 crc kubenswrapper[4967]: I1121 15:58:32.555079 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bf9e09a-a8e5-4430-bb71-600ade140743" path="/var/lib/kubelet/pods/7bf9e09a-a8e5-4430-bb71-600ade140743/volumes" Nov 21 15:58:32 crc kubenswrapper[4967]: I1121 15:58:32.624099 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerStarted","Data":"0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351"} Nov 21 15:58:33 crc kubenswrapper[4967]: I1121 15:58:33.639642 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerStarted","Data":"15bd5691dc70002b55da786e44cc07269318059766d9c6d6f2d7812d3358a6b8"} Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.652113 4967 generic.go:334] "Generic (PLEG): container finished" podID="612fd1ac-5081-454b-946f-85dff74ddf0c" containerID="457e37dcd3010e5e4e5e0d136b9876e4dca552466e91884af6e37ff30a286cda" exitCode=0 Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.652201 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-9djfm" event={"ID":"612fd1ac-5081-454b-946f-85dff74ddf0c","Type":"ContainerDied","Data":"457e37dcd3010e5e4e5e0d136b9876e4dca552466e91884af6e37ff30a286cda"} Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.655197 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerStarted","Data":"f215ef7306694e91437ae9a98b304b83b795e4350243b850ddab657b4ea48bf3"} Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.894403 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.903361 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:34 crc kubenswrapper[4967]: I1121 15:58:34.910406 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.018128 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8fwt\" (UniqueName: \"kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.018241 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.018280 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.119953 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8fwt\" (UniqueName: \"kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.120090 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.120142 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.120802 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.120903 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.136329 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8fwt\" (UniqueName: \"kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt\") pod \"redhat-operators-fzqdr\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.360656 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:58:35 crc kubenswrapper[4967]: I1121 15:58:35.734022 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerStarted","Data":"079bad52f69af24d18537da76e6ce7e98d45bdd5e8cbaced24e2ed725adf5a42"} Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.004113 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 15:58:36 crc kubenswrapper[4967]: W1121 15:58:36.007164 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd18f04e1_396e_462a_aa4c_c9caeb4523ed.slice/crio-e7ddda101601c84b79c16fdfa35f82bf682c5d23b7eeb298eb60c1cd22f5def9 WatchSource:0}: Error finding container e7ddda101601c84b79c16fdfa35f82bf682c5d23b7eeb298eb60c1cd22f5def9: Status 404 returned error can't find the container with id e7ddda101601c84b79c16fdfa35f82bf682c5d23b7eeb298eb60c1cd22f5def9 Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.228285 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.258290 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbqxk\" (UniqueName: \"kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk\") pod \"612fd1ac-5081-454b-946f-85dff74ddf0c\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.258431 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data\") pod \"612fd1ac-5081-454b-946f-85dff74ddf0c\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.258477 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle\") pod \"612fd1ac-5081-454b-946f-85dff74ddf0c\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.258499 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts\") pod \"612fd1ac-5081-454b-946f-85dff74ddf0c\" (UID: \"612fd1ac-5081-454b-946f-85dff74ddf0c\") " Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.270943 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts" (OuterVolumeSpecName: "scripts") pod "612fd1ac-5081-454b-946f-85dff74ddf0c" (UID: "612fd1ac-5081-454b-946f-85dff74ddf0c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.271982 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk" (OuterVolumeSpecName: "kube-api-access-vbqxk") pod "612fd1ac-5081-454b-946f-85dff74ddf0c" (UID: "612fd1ac-5081-454b-946f-85dff74ddf0c"). InnerVolumeSpecName "kube-api-access-vbqxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.292396 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "612fd1ac-5081-454b-946f-85dff74ddf0c" (UID: "612fd1ac-5081-454b-946f-85dff74ddf0c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.303248 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data" (OuterVolumeSpecName: "config-data") pod "612fd1ac-5081-454b-946f-85dff74ddf0c" (UID: "612fd1ac-5081-454b-946f-85dff74ddf0c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.362505 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbqxk\" (UniqueName: \"kubernetes.io/projected/612fd1ac-5081-454b-946f-85dff74ddf0c-kube-api-access-vbqxk\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.362554 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.362570 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.362582 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612fd1ac-5081-454b-946f-85dff74ddf0c-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.747729 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-9djfm" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.747733 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-9djfm" event={"ID":"612fd1ac-5081-454b-946f-85dff74ddf0c","Type":"ContainerDied","Data":"6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd"} Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.748142 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b04cee3e5776e298e5ec4eafe2ef30fab7235d2e1caab1a1253856776b6dacd" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.751224 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerStarted","Data":"7c8b315206d8e0d39c4645f2297599d8cf0bfe476fe6438ee0755944f214c5bc"} Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.751382 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.753914 4967 generic.go:334] "Generic (PLEG): container finished" podID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerID="f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c" exitCode=0 Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.753954 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerDied","Data":"f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c"} Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.753997 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerStarted","Data":"e7ddda101601c84b79c16fdfa35f82bf682c5d23b7eeb298eb60c1cd22f5def9"} Nov 21 15:58:36 crc kubenswrapper[4967]: I1121 15:58:36.794205 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8673309310000001 podStartE2EDuration="5.794184762s" podCreationTimestamp="2025-11-21 15:58:31 +0000 UTC" firstStartedPulling="2025-11-21 15:58:32.51215953 +0000 UTC m=+1400.770680558" lastFinishedPulling="2025-11-21 15:58:36.439013371 +0000 UTC m=+1404.697534389" observedRunningTime="2025-11-21 15:58:36.776070579 +0000 UTC m=+1405.034591587" watchObservedRunningTime="2025-11-21 15:58:36.794184762 +0000 UTC m=+1405.052705760" Nov 21 15:58:37 crc kubenswrapper[4967]: I1121 15:58:37.766729 4967 generic.go:334] "Generic (PLEG): container finished" podID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" containerID="e45d4e062cfb62dfb5575a82f61551d76d148cd72b894b09b0c7b3734bff241e" exitCode=0 Nov 21 15:58:37 crc kubenswrapper[4967]: I1121 15:58:37.766811 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" event={"ID":"6adf654d-d462-4c77-98c5-33b5a6bd9e44","Type":"ContainerDied","Data":"e45d4e062cfb62dfb5575a82f61551d76d148cd72b894b09b0c7b3734bff241e"} Nov 21 15:58:37 crc kubenswrapper[4967]: I1121 15:58:37.770232 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerStarted","Data":"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532"} Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.244337 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.340267 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle\") pod \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.340463 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d82zs\" (UniqueName: \"kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs\") pod \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.340552 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts\") pod \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.340670 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data\") pod \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\" (UID: \"6adf654d-d462-4c77-98c5-33b5a6bd9e44\") " Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.348456 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs" (OuterVolumeSpecName: "kube-api-access-d82zs") pod "6adf654d-d462-4c77-98c5-33b5a6bd9e44" (UID: "6adf654d-d462-4c77-98c5-33b5a6bd9e44"). InnerVolumeSpecName "kube-api-access-d82zs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.364081 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts" (OuterVolumeSpecName: "scripts") pod "6adf654d-d462-4c77-98c5-33b5a6bd9e44" (UID: "6adf654d-d462-4c77-98c5-33b5a6bd9e44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.414434 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data" (OuterVolumeSpecName: "config-data") pod "6adf654d-d462-4c77-98c5-33b5a6bd9e44" (UID: "6adf654d-d462-4c77-98c5-33b5a6bd9e44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.435467 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6adf654d-d462-4c77-98c5-33b5a6bd9e44" (UID: "6adf654d-d462-4c77-98c5-33b5a6bd9e44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.445190 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.445255 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d82zs\" (UniqueName: \"kubernetes.io/projected/6adf654d-d462-4c77-98c5-33b5a6bd9e44-kube-api-access-d82zs\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.445268 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.445279 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6adf654d-d462-4c77-98c5-33b5a6bd9e44-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.795263 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" event={"ID":"6adf654d-d462-4c77-98c5-33b5a6bd9e44","Type":"ContainerDied","Data":"e5bba6d7836b5f74a8a27ff0d9055da911fa00cfdc844cb42a4953f8aca04c7e"} Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.795996 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5bba6d7836b5f74a8a27ff0d9055da911fa00cfdc844cb42a4953f8aca04c7e" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.795331 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jcvtj" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.892856 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 15:58:39 crc kubenswrapper[4967]: E1121 15:58:39.893524 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612fd1ac-5081-454b-946f-85dff74ddf0c" containerName="aodh-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.893541 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="612fd1ac-5081-454b-946f-85dff74ddf0c" containerName="aodh-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: E1121 15:58:39.893580 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" containerName="nova-cell0-conductor-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.893588 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" containerName="nova-cell0-conductor-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.893877 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="612fd1ac-5081-454b-946f-85dff74ddf0c" containerName="aodh-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.893903 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" containerName="nova-cell0-conductor-db-sync" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.895105 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.899981 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-h4mjw" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.900273 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.918721 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.958912 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnmrw\" (UniqueName: \"kubernetes.io/projected/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-kube-api-access-dnmrw\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.959005 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:39 crc kubenswrapper[4967]: I1121 15:58:39.959748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.061817 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.062088 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.062177 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnmrw\" (UniqueName: \"kubernetes.io/projected/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-kube-api-access-dnmrw\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.067486 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.067522 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.080923 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnmrw\" (UniqueName: \"kubernetes.io/projected/f3996bc6-3f27-4ff9-bcc8-688a7ffb6991-kube-api-access-dnmrw\") pod \"nova-cell0-conductor-0\" (UID: \"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991\") " pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.221947 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.718570 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.721678 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.726485 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.726646 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.727721 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6bsgb" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.742944 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.779717 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsjcj\" (UniqueName: \"kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.780089 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.780143 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.780173 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.804120 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 15:58:40 crc kubenswrapper[4967]: W1121 15:58:40.821421 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3996bc6_3f27_4ff9_bcc8_688a7ffb6991.slice/crio-c61a1e7dcae584bff67047bd741f3499fd2719d30d63c37fa29c07afac74b8f4 WatchSource:0}: Error finding container c61a1e7dcae584bff67047bd741f3499fd2719d30d63c37fa29c07afac74b8f4: Status 404 returned error can't find the container with id c61a1e7dcae584bff67047bd741f3499fd2719d30d63c37fa29c07afac74b8f4 Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.881983 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsjcj\" (UniqueName: \"kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.882046 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.882097 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.882125 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.893721 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.897967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.902834 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:40 crc kubenswrapper[4967]: I1121 15:58:40.918821 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsjcj\" (UniqueName: \"kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj\") pod \"aodh-0\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " pod="openstack/aodh-0" Nov 21 15:58:41 crc kubenswrapper[4967]: I1121 15:58:41.060600 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:58:41 crc kubenswrapper[4967]: I1121 15:58:41.566815 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 15:58:41 crc kubenswrapper[4967]: I1121 15:58:41.834415 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerStarted","Data":"853f1050ee1ff7b196c52533e1f0dc0a492fbf9ee5a16c45c2fec6fc5ca10d89"} Nov 21 15:58:41 crc kubenswrapper[4967]: I1121 15:58:41.836031 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991","Type":"ContainerStarted","Data":"61b4c8e4baa0883fa8e82526965bb9674204747a39e4bddc30f78cbeeb828d4e"} Nov 21 15:58:41 crc kubenswrapper[4967]: I1121 15:58:41.836058 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f3996bc6-3f27-4ff9-bcc8-688a7ffb6991","Type":"ContainerStarted","Data":"c61a1e7dcae584bff67047bd741f3499fd2719d30d63c37fa29c07afac74b8f4"} Nov 21 15:58:42 crc kubenswrapper[4967]: I1121 15:58:42.848127 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:42 crc kubenswrapper[4967]: I1121 15:58:42.885132 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.8851105390000003 podStartE2EDuration="3.885110539s" podCreationTimestamp="2025-11-21 15:58:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:58:42.874683203 +0000 UTC m=+1411.133204211" watchObservedRunningTime="2025-11-21 15:58:42.885110539 +0000 UTC m=+1411.143631537" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.441215 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.441827 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-central-agent" containerID="cri-o://15bd5691dc70002b55da786e44cc07269318059766d9c6d6f2d7812d3358a6b8" gracePeriod=30 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.441892 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-notification-agent" containerID="cri-o://f215ef7306694e91437ae9a98b304b83b795e4350243b850ddab657b4ea48bf3" gracePeriod=30 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.441892 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="sg-core" containerID="cri-o://079bad52f69af24d18537da76e6ce7e98d45bdd5e8cbaced24e2ed725adf5a42" gracePeriod=30 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.441864 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="proxy-httpd" containerID="cri-o://7c8b315206d8e0d39c4645f2297599d8cf0bfe476fe6438ee0755944f214c5bc" gracePeriod=30 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.533716 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.536789 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.551538 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.658556 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqxkk\" (UniqueName: \"kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.658683 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.659106 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.762360 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqxkk\" (UniqueName: \"kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.762957 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.763241 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.763437 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.763914 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.783765 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqxkk\" (UniqueName: \"kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk\") pod \"redhat-marketplace-2bxgl\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.873997 4967 generic.go:334] "Generic (PLEG): container finished" podID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerID="7c8b315206d8e0d39c4645f2297599d8cf0bfe476fe6438ee0755944f214c5bc" exitCode=0 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.874036 4967 generic.go:334] "Generic (PLEG): container finished" podID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerID="079bad52f69af24d18537da76e6ce7e98d45bdd5e8cbaced24e2ed725adf5a42" exitCode=2 Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.874063 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerDied","Data":"7c8b315206d8e0d39c4645f2297599d8cf0bfe476fe6438ee0755944f214c5bc"} Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.874106 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerDied","Data":"079bad52f69af24d18537da76e6ce7e98d45bdd5e8cbaced24e2ed725adf5a42"} Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.918089 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:58:43 crc kubenswrapper[4967]: I1121 15:58:43.925490 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.638177 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:58:44 crc kubenswrapper[4967]: W1121 15:58:44.638651 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode088bef1_d23d_47a1_b266_6493f8fe0507.slice/crio-63cfd74a4c7919cb3d47d70ac3038fbb99584401dfb2ed31236a17bdf6556efb WatchSource:0}: Error finding container 63cfd74a4c7919cb3d47d70ac3038fbb99584401dfb2ed31236a17bdf6556efb: Status 404 returned error can't find the container with id 63cfd74a4c7919cb3d47d70ac3038fbb99584401dfb2ed31236a17bdf6556efb Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.888842 4967 generic.go:334] "Generic (PLEG): container finished" podID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerID="f215ef7306694e91437ae9a98b304b83b795e4350243b850ddab657b4ea48bf3" exitCode=0 Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.888870 4967 generic.go:334] "Generic (PLEG): container finished" podID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerID="15bd5691dc70002b55da786e44cc07269318059766d9c6d6f2d7812d3358a6b8" exitCode=0 Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.888892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerDied","Data":"f215ef7306694e91437ae9a98b304b83b795e4350243b850ddab657b4ea48bf3"} Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.888947 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerDied","Data":"15bd5691dc70002b55da786e44cc07269318059766d9c6d6f2d7812d3358a6b8"} Nov 21 15:58:44 crc kubenswrapper[4967]: I1121 15:58:44.890176 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerStarted","Data":"63cfd74a4c7919cb3d47d70ac3038fbb99584401dfb2ed31236a17bdf6556efb"} Nov 21 15:58:45 crc kubenswrapper[4967]: I1121 15:58:45.916966 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerStarted","Data":"9128ac2fa213451d9486f8c34cb9a75e619c0341eb0fa0b5057dde6062413a33"} Nov 21 15:58:45 crc kubenswrapper[4967]: I1121 15:58:45.920865 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f41b3fb0-7b2a-489d-a288-337011aa8cb4","Type":"ContainerDied","Data":"0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351"} Nov 21 15:58:45 crc kubenswrapper[4967]: I1121 15:58:45.920905 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a520700ac756e321b8eb598bfde4ab426cd66bcb75cb50360af18e95af9c351" Nov 21 15:58:45 crc kubenswrapper[4967]: I1121 15:58:45.923207 4967 generic.go:334] "Generic (PLEG): container finished" podID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerID="add509dd55319fd519fd276fb0d51a2295e888243aa7ad48f667cf1c8a81b779" exitCode=0 Nov 21 15:58:45 crc kubenswrapper[4967]: I1121 15:58:45.923260 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerDied","Data":"add509dd55319fd519fd276fb0d51a2295e888243aa7ad48f667cf1c8a81b779"} Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.020414 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.116753 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.116846 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.116964 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.117119 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.117164 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.117253 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5l94t\" (UniqueName: \"kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.117281 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd\") pod \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\" (UID: \"f41b3fb0-7b2a-489d-a288-337011aa8cb4\") " Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.117511 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.118149 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.118170 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.121997 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts" (OuterVolumeSpecName: "scripts") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.135583 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t" (OuterVolumeSpecName: "kube-api-access-5l94t") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "kube-api-access-5l94t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.166544 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.221495 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5l94t\" (UniqueName: \"kubernetes.io/projected/f41b3fb0-7b2a-489d-a288-337011aa8cb4-kube-api-access-5l94t\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.221533 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f41b3fb0-7b2a-489d-a288-337011aa8cb4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.221542 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.221551 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.249065 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.272076 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data" (OuterVolumeSpecName: "config-data") pod "f41b3fb0-7b2a-489d-a288-337011aa8cb4" (UID: "f41b3fb0-7b2a-489d-a288-337011aa8cb4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.323209 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.323243 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f41b3fb0-7b2a-489d-a288-337011aa8cb4-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.522076 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.522132 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.522189 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.523004 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.523064 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054" gracePeriod=600 Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.933805 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.961173 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.972614 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.987277 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:46 crc kubenswrapper[4967]: E1121 15:58:46.987910 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-notification-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.987934 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-notification-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: E1121 15:58:46.987960 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="proxy-httpd" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.987969 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="proxy-httpd" Nov 21 15:58:46 crc kubenswrapper[4967]: E1121 15:58:46.987998 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="sg-core" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988007 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="sg-core" Nov 21 15:58:46 crc kubenswrapper[4967]: E1121 15:58:46.988025 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-central-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988043 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-central-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988344 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-notification-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988375 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="proxy-httpd" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988417 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="sg-core" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.988441 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" containerName="ceilometer-central-agent" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.991014 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.992830 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.995717 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:58:46 crc kubenswrapper[4967]: I1121 15:58:46.998125 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040260 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040339 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040366 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040403 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040560 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040597 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpn4z\" (UniqueName: \"kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.040642 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142380 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142443 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142470 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142492 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142516 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142648 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.142686 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpn4z\" (UniqueName: \"kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.143030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.143486 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.147343 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.148724 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.148809 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.149244 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.171099 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpn4z\" (UniqueName: \"kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z\") pod \"ceilometer-0\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.311480 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.796031 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.979618 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054" exitCode=0 Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.979686 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054"} Nov 21 15:58:47 crc kubenswrapper[4967]: I1121 15:58:47.979726 4967 scope.go:117] "RemoveContainer" containerID="d0c41fa7ce71cf310016f53428786fb104a174849e89edc64a61d157cdf085ba" Nov 21 15:58:48 crc kubenswrapper[4967]: W1121 15:58:48.438625 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod268acb69_9196_47c9_8f92_b8b16d63a4a9.slice/crio-1cbab925f72ff4b4f3400186b6573feea63645f3fd919099c82f1228fd1783b9 WatchSource:0}: Error finding container 1cbab925f72ff4b4f3400186b6573feea63645f3fd919099c82f1228fd1783b9: Status 404 returned error can't find the container with id 1cbab925f72ff4b4f3400186b6573feea63645f3fd919099c82f1228fd1783b9 Nov 21 15:58:48 crc kubenswrapper[4967]: I1121 15:58:48.552448 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f41b3fb0-7b2a-489d-a288-337011aa8cb4" path="/var/lib/kubelet/pods/f41b3fb0-7b2a-489d-a288-337011aa8cb4/volumes" Nov 21 15:58:48 crc kubenswrapper[4967]: I1121 15:58:48.994215 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerStarted","Data":"1cbab925f72ff4b4f3400186b6573feea63645f3fd919099c82f1228fd1783b9"} Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.199156 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.202664 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.211258 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.262514 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.333352 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.333497 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.333554 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctcsd\" (UniqueName: \"kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.435692 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.435856 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.435938 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctcsd\" (UniqueName: \"kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.436371 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.436428 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.455595 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctcsd\" (UniqueName: \"kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd\") pod \"certified-operators-kgfq8\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:50 crc kubenswrapper[4967]: I1121 15:58:50.546237 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.027657 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:58:51 crc kubenswrapper[4967]: W1121 15:58:51.040497 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode53b7b96_ffc9_40af_b2c4_9c6b835f570d.slice/crio-683938c72621959684e5d00dc647ab581b546864dee7b2c5409c44c554a1a664 WatchSource:0}: Error finding container 683938c72621959684e5d00dc647ab581b546864dee7b2c5409c44c554a1a664: Status 404 returned error can't find the container with id 683938c72621959684e5d00dc647ab581b546864dee7b2c5409c44c554a1a664 Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.617235 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-f6s7x"] Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.619253 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.621809 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.621999 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.631407 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-f6s7x"] Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.762862 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.762991 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.763018 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncrgj\" (UniqueName: \"kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.763117 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.870269 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.870609 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.870730 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.870771 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncrgj\" (UniqueName: \"kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.887607 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.899042 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z9vxs"] Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.913018 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.926391 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z9vxs"] Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.931101 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncrgj\" (UniqueName: \"kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.931682 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.961677 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-f6s7x\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.969973 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.970203 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.973152 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.992110 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.992163 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:51 crc kubenswrapper[4967]: I1121 15:58:51.992295 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfhxn\" (UniqueName: \"kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.095575 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.095807 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.095941 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfhxn\" (UniqueName: \"kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.096154 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.105087 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.106377 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.117504 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.119373 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.131134 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.132940 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.160549 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfhxn\" (UniqueName: \"kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn\") pod \"nova-cell1-conductor-db-sync-z9vxs\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.167952 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a"} Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.180281 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.198741 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerStarted","Data":"683938c72621959684e5d00dc647ab581b546864dee7b2c5409c44c554a1a664"} Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.200914 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj9k9\" (UniqueName: \"kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.201080 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.201122 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.229401 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerStarted","Data":"5c94c4de764d7e09052b910d266c640a80428e60e7ecea0aca91e131d6b1129a"} Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.239074 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.262620 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerStarted","Data":"8a626686d0ac820a64f38a9f9f4f4a22baef94c17eb8be10856ac7f85b955ddc"} Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.265454 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.267418 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.286049 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.305767 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.305837 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.305929 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.305958 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nctxr\" (UniqueName: \"kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.306474 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gj9k9\" (UniqueName: \"kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.306685 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.307053 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.330285 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.331857 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.338202 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.341349 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.350515 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj9k9\" (UniqueName: \"kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9\") pod \"nova-cell1-novncproxy-0\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.408638 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.408992 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.409206 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.409295 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nctxr\" (UniqueName: \"kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.410242 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.421212 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.430651 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.433263 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.441255 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.444453 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.464589 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nctxr\" (UniqueName: \"kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr\") pod \"nova-api-0\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " pod="openstack/nova-api-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.496516 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.512828 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.513153 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.513396 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rv4c4\" (UniqueName: \"kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.615907 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.616414 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.616618 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rv4c4\" (UniqueName: \"kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.631374 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.639356 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.673846 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rv4c4\" (UniqueName: \"kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4\") pod \"nova-scheduler-0\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " pod="openstack/nova-scheduler-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.959695 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.961802 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.961835 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.962096 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.965483 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.969381 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:58:52 crc kubenswrapper[4967]: I1121 15:58:52.969540 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.015205 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.019946 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028072 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028118 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkgxp\" (UniqueName: \"kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028206 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028238 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028289 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028413 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028440 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028507 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028526 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.028745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zksr\" (UniqueName: \"kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.032520 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130516 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130548 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130637 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130666 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130778 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zksr\" (UniqueName: \"kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.130862 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkgxp\" (UniqueName: \"kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.131010 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.131063 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.131360 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.131403 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.132109 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.132595 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.132979 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.169838 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.170382 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.171000 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.174028 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zksr\" (UniqueName: \"kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr\") pod \"dnsmasq-dns-568d7fd7cf-mmfpv\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.174890 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkgxp\" (UniqueName: \"kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp\") pod \"nova-metadata-0\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.277298 4967 generic.go:334] "Generic (PLEG): container finished" podID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerID="7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013" exitCode=0 Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.277355 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerDied","Data":"7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013"} Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.348005 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:58:53 crc kubenswrapper[4967]: I1121 15:58:53.357491 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.133957 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z9vxs"] Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.315724 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" event={"ID":"827df8c5-068d-48b6-af4d-f971bdacdcb3","Type":"ContainerStarted","Data":"04e9fc1f2936d76821a242f5c1da2360688dd62b1c803fdd1b7b3aaaf62c4b4d"} Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.343438 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-f6s7x"] Nov 21 15:58:54 crc kubenswrapper[4967]: W1121 15:58:54.445399 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2225c0b_f7f0_4d45_80cd_cde7456d6f15.slice/crio-89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956 WatchSource:0}: Error finding container 89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956: Status 404 returned error can't find the container with id 89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956 Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.578455 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.578552 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:58:54 crc kubenswrapper[4967]: W1121 15:58:54.585392 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d765a37_c8ac_4fc9_b550_5eaf97deaf09.slice/crio-e049475e7ce5bcf84033e0e641c0dd3a2055d2a5acd982e69abb985bc6500c20 WatchSource:0}: Error finding container e049475e7ce5bcf84033e0e641c0dd3a2055d2a5acd982e69abb985bc6500c20: Status 404 returned error can't find the container with id e049475e7ce5bcf84033e0e641c0dd3a2055d2a5acd982e69abb985bc6500c20 Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.980707 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:58:54 crc kubenswrapper[4967]: W1121 15:58:54.991222 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b3cab8b_d91c_44b1_8d42_0f0c1aa6a678.slice/crio-d5407c9825d14cac5cbb13dc9f61b45e7151115fa7cdf8e513dd0e67223c7cce WatchSource:0}: Error finding container d5407c9825d14cac5cbb13dc9f61b45e7151115fa7cdf8e513dd0e67223c7cce: Status 404 returned error can't find the container with id d5407c9825d14cac5cbb13dc9f61b45e7151115fa7cdf8e513dd0e67223c7cce Nov 21 15:58:54 crc kubenswrapper[4967]: I1121 15:58:54.993334 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:58:55 crc kubenswrapper[4967]: W1121 15:58:55.002788 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c75d46e_d31e_4505_ba3b_88d50d9bf5dc.slice/crio-bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219 WatchSource:0}: Error finding container bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219: Status 404 returned error can't find the container with id bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219 Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.008578 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.336350 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerStarted","Data":"29814839aa4ca581d3c5486ece5b655acb7168192a1935ddf0e8ba3a5e104af3"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.337606 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerStarted","Data":"d5407c9825d14cac5cbb13dc9f61b45e7151115fa7cdf8e513dd0e67223c7cce"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.338467 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerStarted","Data":"bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.340098 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerStarted","Data":"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.342473 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d765a37-c8ac-4fc9-b550-5eaf97deaf09","Type":"ContainerStarted","Data":"e049475e7ce5bcf84033e0e641c0dd3a2055d2a5acd982e69abb985bc6500c20"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.350574 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ffce645f-18bf-4182-8524-38af2bc17063","Type":"ContainerStarted","Data":"ed7c10562e5d24e1f2cf2000a72f3fcf2fa824b54a0ae1d1424e977c3223c8f5"} Nov 21 15:58:55 crc kubenswrapper[4967]: I1121 15:58:55.358565 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f6s7x" event={"ID":"c2225c0b-f7f0-4d45-80cd-cde7456d6f15","Type":"ContainerStarted","Data":"89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956"} Nov 21 15:58:56 crc kubenswrapper[4967]: I1121 15:58:56.372940 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f6s7x" event={"ID":"c2225c0b-f7f0-4d45-80cd-cde7456d6f15","Type":"ContainerStarted","Data":"70b9fd1a634edec4ed2d2f40eb9cad169a2b3356fada7646857b8e48488baa8c"} Nov 21 15:58:56 crc kubenswrapper[4967]: I1121 15:58:56.375862 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" event={"ID":"827df8c5-068d-48b6-af4d-f971bdacdcb3","Type":"ContainerStarted","Data":"836e3046d9e0e13c969f1e742e556cd8f9ecd3056d0193761cfce2ef1a63c2a9"} Nov 21 15:58:56 crc kubenswrapper[4967]: I1121 15:58:56.792572 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:58:56 crc kubenswrapper[4967]: I1121 15:58:56.804167 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:58:57 crc kubenswrapper[4967]: I1121 15:58:57.398103 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerStarted","Data":"e38ed347585b722223ea42a57aaa3839160fa5237e37d6904b839b60f7b970a0"} Nov 21 15:58:57 crc kubenswrapper[4967]: I1121 15:58:57.418907 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-f6s7x" podStartSLOduration=6.41888643 podStartE2EDuration="6.41888643s" podCreationTimestamp="2025-11-21 15:58:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:58:57.414088549 +0000 UTC m=+1425.672609557" watchObservedRunningTime="2025-11-21 15:58:57.41888643 +0000 UTC m=+1425.677407438" Nov 21 15:58:58 crc kubenswrapper[4967]: I1121 15:58:58.413044 4967 generic.go:334] "Generic (PLEG): container finished" podID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerID="e38ed347585b722223ea42a57aaa3839160fa5237e37d6904b839b60f7b970a0" exitCode=0 Nov 21 15:58:58 crc kubenswrapper[4967]: I1121 15:58:58.413200 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerDied","Data":"e38ed347585b722223ea42a57aaa3839160fa5237e37d6904b839b60f7b970a0"} Nov 21 15:58:58 crc kubenswrapper[4967]: I1121 15:58:58.466615 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" podStartSLOduration=7.466574197 podStartE2EDuration="7.466574197s" podCreationTimestamp="2025-11-21 15:58:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:58:58.460577791 +0000 UTC m=+1426.719098799" watchObservedRunningTime="2025-11-21 15:58:58.466574197 +0000 UTC m=+1426.725095215" Nov 21 15:58:59 crc kubenswrapper[4967]: I1121 15:58:59.432784 4967 generic.go:334] "Generic (PLEG): container finished" podID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerID="7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532" exitCode=0 Nov 21 15:58:59 crc kubenswrapper[4967]: I1121 15:58:59.432866 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerDied","Data":"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532"} Nov 21 15:58:59 crc kubenswrapper[4967]: I1121 15:58:59.438949 4967 generic.go:334] "Generic (PLEG): container finished" podID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerID="8a626686d0ac820a64f38a9f9f4f4a22baef94c17eb8be10856ac7f85b955ddc" exitCode=0 Nov 21 15:58:59 crc kubenswrapper[4967]: I1121 15:58:59.439008 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerDied","Data":"8a626686d0ac820a64f38a9f9f4f4a22baef94c17eb8be10856ac7f85b955ddc"} Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.498418 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerStarted","Data":"08d586ead6747c4d5608371fd684665888e0b4eb12b61b2ed59f0a170ff1f5a7"} Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.499231 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.514422 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerStarted","Data":"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54"} Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.524888 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerStarted","Data":"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6"} Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.534173 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" podStartSLOduration=8.534150825 podStartE2EDuration="8.534150825s" podCreationTimestamp="2025-11-21 15:58:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:00.521634787 +0000 UTC m=+1428.780155795" watchObservedRunningTime="2025-11-21 15:59:00.534150825 +0000 UTC m=+1428.792671833" Nov 21 15:59:00 crc kubenswrapper[4967]: I1121 15:59:00.591838 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerStarted","Data":"7e11cbb3057d6975e8df896d8fbe7291ea52ff0553f90dc377f996aa57edaa1d"} Nov 21 15:59:01 crc kubenswrapper[4967]: I1121 15:59:01.646703 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerStarted","Data":"680cad6b36778fb418bf89136ac7828862421c211a05cd5f5f442cea202fb7bb"} Nov 21 15:59:01 crc kubenswrapper[4967]: I1121 15:59:01.664666 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerStarted","Data":"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470"} Nov 21 15:59:01 crc kubenswrapper[4967]: I1121 15:59:01.712880 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2bxgl" podStartSLOduration=4.430178944 podStartE2EDuration="18.712855683s" podCreationTimestamp="2025-11-21 15:58:43 +0000 UTC" firstStartedPulling="2025-11-21 15:58:45.924933467 +0000 UTC m=+1414.183454475" lastFinishedPulling="2025-11-21 15:59:00.207610206 +0000 UTC m=+1428.466131214" observedRunningTime="2025-11-21 15:59:01.709599058 +0000 UTC m=+1429.968120076" watchObservedRunningTime="2025-11-21 15:59:01.712855683 +0000 UTC m=+1429.971376691" Nov 21 15:59:01 crc kubenswrapper[4967]: I1121 15:59:01.750957 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fzqdr" podStartSLOduration=4.41136345 podStartE2EDuration="27.750933593s" podCreationTimestamp="2025-11-21 15:58:34 +0000 UTC" firstStartedPulling="2025-11-21 15:58:36.755941958 +0000 UTC m=+1405.014462966" lastFinishedPulling="2025-11-21 15:59:00.095512101 +0000 UTC m=+1428.354033109" observedRunningTime="2025-11-21 15:59:01.742873696 +0000 UTC m=+1430.001394724" watchObservedRunningTime="2025-11-21 15:59:01.750933593 +0000 UTC m=+1430.009454601" Nov 21 15:59:02 crc kubenswrapper[4967]: I1121 15:59:02.698098 4967 generic.go:334] "Generic (PLEG): container finished" podID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerID="76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6" exitCode=0 Nov 21 15:59:02 crc kubenswrapper[4967]: I1121 15:59:02.699298 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerDied","Data":"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6"} Nov 21 15:59:03 crc kubenswrapper[4967]: I1121 15:59:03.918470 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:03 crc kubenswrapper[4967]: I1121 15:59:03.918801 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:04 crc kubenswrapper[4967]: I1121 15:59:04.973440 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-2bxgl" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:04 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:04 crc kubenswrapper[4967]: > Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.361151 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.361510 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.775345 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerStarted","Data":"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.786058 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerStarted","Data":"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.788486 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d765a37-c8ac-4fc9-b550-5eaf97deaf09","Type":"ContainerStarted","Data":"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.793151 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerStarted","Data":"1a961c45eaba440f4856c5acc727fc7f17824dd8a7966789ff4dae85765040a6"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.793255 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-api" containerID="cri-o://9128ac2fa213451d9486f8c34cb9a75e619c0341eb0fa0b5057dde6062413a33" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.793282 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-evaluator" containerID="cri-o://5c94c4de764d7e09052b910d266c640a80428e60e7ecea0aca91e131d6b1129a" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.793324 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-listener" containerID="cri-o://1a961c45eaba440f4856c5acc727fc7f17824dd8a7966789ff4dae85765040a6" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.793290 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-notifier" containerID="cri-o://7e11cbb3057d6975e8df896d8fbe7291ea52ff0553f90dc377f996aa57edaa1d" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.800325 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ffce645f-18bf-4182-8524-38af2bc17063","Type":"ContainerStarted","Data":"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.800377 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="ffce645f-18bf-4182-8524-38af2bc17063" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.803561 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerStarted","Data":"1e5741d399e6cdd6f295470a873c05f0c3922675e1e9bc8ab0babede1af7d6e4"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.803634 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerStarted","Data":"23d64f268e8f784e593190234efc55f15f7a62b253f0edf99d7b774e9c66045d"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.803786 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-log" containerID="cri-o://23d64f268e8f784e593190234efc55f15f7a62b253f0edf99d7b774e9c66045d" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.803909 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-metadata" containerID="cri-o://1e5741d399e6cdd6f295470a873c05f0c3922675e1e9bc8ab0babede1af7d6e4" gracePeriod=30 Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.815660 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerStarted","Data":"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.815897 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerStarted","Data":"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d"} Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.817001 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kgfq8" podStartSLOduration=4.480495383 podStartE2EDuration="15.816983917s" podCreationTimestamp="2025-11-21 15:58:50 +0000 UTC" firstStartedPulling="2025-11-21 15:58:53.299852268 +0000 UTC m=+1421.558373276" lastFinishedPulling="2025-11-21 15:59:04.636340812 +0000 UTC m=+1432.894861810" observedRunningTime="2025-11-21 15:59:05.809014992 +0000 UTC m=+1434.067536000" watchObservedRunningTime="2025-11-21 15:59:05.816983917 +0000 UTC m=+1434.075504925" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.849453 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.9745175 podStartE2EDuration="13.84942949s" podCreationTimestamp="2025-11-21 15:58:52 +0000 UTC" firstStartedPulling="2025-11-21 15:58:54.605619351 +0000 UTC m=+1422.864140359" lastFinishedPulling="2025-11-21 15:59:04.480531341 +0000 UTC m=+1432.739052349" observedRunningTime="2025-11-21 15:59:05.834755619 +0000 UTC m=+1434.093276627" watchObservedRunningTime="2025-11-21 15:59:05.84942949 +0000 UTC m=+1434.107950498" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.867198 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.7855230300000002 podStartE2EDuration="25.867180742s" podCreationTimestamp="2025-11-21 15:58:40 +0000 UTC" firstStartedPulling="2025-11-21 15:58:41.559005787 +0000 UTC m=+1409.817526795" lastFinishedPulling="2025-11-21 15:59:04.640663499 +0000 UTC m=+1432.899184507" observedRunningTime="2025-11-21 15:59:05.866197443 +0000 UTC m=+1434.124718451" watchObservedRunningTime="2025-11-21 15:59:05.867180742 +0000 UTC m=+1434.125701750" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.977231 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=4.085998697 podStartE2EDuration="13.977209166s" podCreationTimestamp="2025-11-21 15:58:52 +0000 UTC" firstStartedPulling="2025-11-21 15:58:54.587421576 +0000 UTC m=+1422.845942584" lastFinishedPulling="2025-11-21 15:59:04.478632045 +0000 UTC m=+1432.737153053" observedRunningTime="2025-11-21 15:59:05.906083366 +0000 UTC m=+1434.164604384" watchObservedRunningTime="2025-11-21 15:59:05.977209166 +0000 UTC m=+1434.235730174" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.982015 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.427863606 podStartE2EDuration="13.981999377s" podCreationTimestamp="2025-11-21 15:58:52 +0000 UTC" firstStartedPulling="2025-11-21 15:58:55.000785317 +0000 UTC m=+1423.259306325" lastFinishedPulling="2025-11-21 15:59:04.554921088 +0000 UTC m=+1432.813442096" observedRunningTime="2025-11-21 15:59:05.934774989 +0000 UTC m=+1434.193296007" watchObservedRunningTime="2025-11-21 15:59:05.981999377 +0000 UTC m=+1434.240520385" Nov 21 15:59:05 crc kubenswrapper[4967]: I1121 15:59:05.995417 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=4.505777436 podStartE2EDuration="13.995398951s" podCreationTimestamp="2025-11-21 15:58:52 +0000 UTC" firstStartedPulling="2025-11-21 15:58:54.994183163 +0000 UTC m=+1423.252704171" lastFinishedPulling="2025-11-21 15:59:04.483804678 +0000 UTC m=+1432.742325686" observedRunningTime="2025-11-21 15:59:05.969715126 +0000 UTC m=+1434.228236134" watchObservedRunningTime="2025-11-21 15:59:05.995398951 +0000 UTC m=+1434.253919959" Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.450718 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:06 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:06 crc kubenswrapper[4967]: > Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.833011 4967 generic.go:334] "Generic (PLEG): container finished" podID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerID="23d64f268e8f784e593190234efc55f15f7a62b253f0edf99d7b774e9c66045d" exitCode=143 Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.833116 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerDied","Data":"23d64f268e8f784e593190234efc55f15f7a62b253f0edf99d7b774e9c66045d"} Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.835507 4967 generic.go:334] "Generic (PLEG): container finished" podID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerID="5c94c4de764d7e09052b910d266c640a80428e60e7ecea0aca91e131d6b1129a" exitCode=0 Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.835530 4967 generic.go:334] "Generic (PLEG): container finished" podID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerID="9128ac2fa213451d9486f8c34cb9a75e619c0341eb0fa0b5057dde6062413a33" exitCode=0 Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.835719 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerDied","Data":"5c94c4de764d7e09052b910d266c640a80428e60e7ecea0aca91e131d6b1129a"} Nov 21 15:59:06 crc kubenswrapper[4967]: I1121 15:59:06.835776 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerDied","Data":"9128ac2fa213451d9486f8c34cb9a75e619c0341eb0fa0b5057dde6062413a33"} Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.849527 4967 generic.go:334] "Generic (PLEG): container finished" podID="c2225c0b-f7f0-4d45-80cd-cde7456d6f15" containerID="70b9fd1a634edec4ed2d2f40eb9cad169a2b3356fada7646857b8e48488baa8c" exitCode=0 Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.849617 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f6s7x" event={"ID":"c2225c0b-f7f0-4d45-80cd-cde7456d6f15","Type":"ContainerDied","Data":"70b9fd1a634edec4ed2d2f40eb9cad169a2b3356fada7646857b8e48488baa8c"} Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.855912 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerStarted","Data":"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c"} Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.856068 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.858926 4967 generic.go:334] "Generic (PLEG): container finished" podID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerID="7e11cbb3057d6975e8df896d8fbe7291ea52ff0553f90dc377f996aa57edaa1d" exitCode=0 Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.859231 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerDied","Data":"7e11cbb3057d6975e8df896d8fbe7291ea52ff0553f90dc377f996aa57edaa1d"} Nov 21 15:59:07 crc kubenswrapper[4967]: I1121 15:59:07.903667 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.9108276760000003 podStartE2EDuration="21.903640716s" podCreationTimestamp="2025-11-21 15:58:46 +0000 UTC" firstStartedPulling="2025-11-21 15:58:48.48629448 +0000 UTC m=+1416.744815488" lastFinishedPulling="2025-11-21 15:59:06.47910752 +0000 UTC m=+1434.737628528" observedRunningTime="2025-11-21 15:59:07.891462838 +0000 UTC m=+1436.149983856" watchObservedRunningTime="2025-11-21 15:59:07.903640716 +0000 UTC m=+1436.162161734" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.016155 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.033042 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.349047 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.349117 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.360247 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.517396 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.517672 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="dnsmasq-dns" containerID="cri-o://2562c13354817d0fa6368e222c43df2c762ca08d466429c2d6e8273a55b89711" gracePeriod=10 Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.909879 4967 generic.go:334] "Generic (PLEG): container finished" podID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerID="2562c13354817d0fa6368e222c43df2c762ca08d466429c2d6e8273a55b89711" exitCode=0 Nov 21 15:59:08 crc kubenswrapper[4967]: I1121 15:59:08.910066 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" event={"ID":"b316537c-ac90-4a7b-8cee-ed9cb7199f98","Type":"ContainerDied","Data":"2562c13354817d0fa6368e222c43df2c762ca08d466429c2d6e8273a55b89711"} Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.355521 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.397492 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.397599 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.397722 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pt92c\" (UniqueName: \"kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.397852 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.397983 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.398076 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config\") pod \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\" (UID: \"b316537c-ac90-4a7b-8cee-ed9cb7199f98\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.423794 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c" (OuterVolumeSpecName: "kube-api-access-pt92c") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "kube-api-access-pt92c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.497578 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config" (OuterVolumeSpecName: "config") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.500942 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pt92c\" (UniqueName: \"kubernetes.io/projected/b316537c-ac90-4a7b-8cee-ed9cb7199f98-kube-api-access-pt92c\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.500991 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.562019 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.572405 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.575873 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.583301 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.604408 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data\") pod \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.604558 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts\") pod \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.604662 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle\") pod \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.604831 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncrgj\" (UniqueName: \"kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj\") pod \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\" (UID: \"c2225c0b-f7f0-4d45-80cd-cde7456d6f15\") " Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.607603 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.607639 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.607658 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.619191 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts" (OuterVolumeSpecName: "scripts") pod "c2225c0b-f7f0-4d45-80cd-cde7456d6f15" (UID: "c2225c0b-f7f0-4d45-80cd-cde7456d6f15"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.626024 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj" (OuterVolumeSpecName: "kube-api-access-ncrgj") pod "c2225c0b-f7f0-4d45-80cd-cde7456d6f15" (UID: "c2225c0b-f7f0-4d45-80cd-cde7456d6f15"). InnerVolumeSpecName "kube-api-access-ncrgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.647949 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b316537c-ac90-4a7b-8cee-ed9cb7199f98" (UID: "b316537c-ac90-4a7b-8cee-ed9cb7199f98"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.670918 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data" (OuterVolumeSpecName: "config-data") pod "c2225c0b-f7f0-4d45-80cd-cde7456d6f15" (UID: "c2225c0b-f7f0-4d45-80cd-cde7456d6f15"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.705069 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2225c0b-f7f0-4d45-80cd-cde7456d6f15" (UID: "c2225c0b-f7f0-4d45-80cd-cde7456d6f15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.709419 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b316537c-ac90-4a7b-8cee-ed9cb7199f98-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.709464 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.709478 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.709499 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.709515 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncrgj\" (UniqueName: \"kubernetes.io/projected/c2225c0b-f7f0-4d45-80cd-cde7456d6f15-kube-api-access-ncrgj\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.936482 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" event={"ID":"b316537c-ac90-4a7b-8cee-ed9cb7199f98","Type":"ContainerDied","Data":"fa541bcbdd262094577df0665e707ffae0e47a0c9c5cf804886cc4bb1569a18f"} Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.936550 4967 scope.go:117] "RemoveContainer" containerID="2562c13354817d0fa6368e222c43df2c762ca08d466429c2d6e8273a55b89711" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.937658 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-w6tqb" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.938373 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-f6s7x" event={"ID":"c2225c0b-f7f0-4d45-80cd-cde7456d6f15","Type":"ContainerDied","Data":"89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956"} Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.938398 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89aba41a4dbda43733e78f0b0979188be257b31f05a1073505479e825019d956" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.938475 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-f6s7x" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.972158 4967 scope.go:117] "RemoveContainer" containerID="7d967aa61e05a24ed613d07f32d8bb266e091a1c52c9e7b8d8ea1c63c26399de" Nov 21 15:59:09 crc kubenswrapper[4967]: I1121 15:59:09.984935 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.001645 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-w6tqb"] Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.068368 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.068812 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" containerName="nova-scheduler-scheduler" containerID="cri-o://38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae" gracePeriod=30 Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.084147 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.084486 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-log" containerID="cri-o://db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" gracePeriod=30 Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.084957 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-api" containerID="cri-o://7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" gracePeriod=30 Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.566730 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" path="/var/lib/kubelet/pods/b316537c-ac90-4a7b-8cee-ed9cb7199f98/volumes" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.569504 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.569534 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.610580 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.823703 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.837359 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nctxr\" (UniqueName: \"kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr\") pod \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.837432 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle\") pod \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.837669 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs\") pod \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.837808 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data\") pod \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\" (UID: \"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678\") " Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.837986 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs" (OuterVolumeSpecName: "logs") pod "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" (UID: "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.838482 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.844892 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr" (OuterVolumeSpecName: "kube-api-access-nctxr") pod "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" (UID: "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678"). InnerVolumeSpecName "kube-api-access-nctxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.884471 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data" (OuterVolumeSpecName: "config-data") pod "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" (UID: "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.888174 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" (UID: "0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.941356 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.941398 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nctxr\" (UniqueName: \"kubernetes.io/projected/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-kube-api-access-nctxr\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.941411 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957057 4967 generic.go:334] "Generic (PLEG): container finished" podID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerID="7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" exitCode=0 Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957089 4967 generic.go:334] "Generic (PLEG): container finished" podID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerID="db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" exitCode=143 Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957106 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerDied","Data":"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5"} Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957155 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957176 4967 scope.go:117] "RemoveContainer" containerID="7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957161 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerDied","Data":"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d"} Nov 21 15:59:10 crc kubenswrapper[4967]: I1121 15:59:10.957385 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678","Type":"ContainerDied","Data":"d5407c9825d14cac5cbb13dc9f61b45e7151115fa7cdf8e513dd0e67223c7cce"} Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.011373 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.014198 4967 scope.go:117] "RemoveContainer" containerID="db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.028099 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045198 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.045721 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="dnsmasq-dns" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045739 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="dnsmasq-dns" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.045755 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-api" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045761 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-api" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.045779 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2225c0b-f7f0-4d45-80cd-cde7456d6f15" containerName="nova-manage" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045784 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2225c0b-f7f0-4d45-80cd-cde7456d6f15" containerName="nova-manage" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.045809 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-log" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045814 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-log" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.045831 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="init" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.045837 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="init" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.046067 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2225c0b-f7f0-4d45-80cd-cde7456d6f15" containerName="nova-manage" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.046082 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-api" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.046099 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b316537c-ac90-4a7b-8cee-ed9cb7199f98" containerName="dnsmasq-dns" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.046107 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" containerName="nova-api-log" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.049096 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.056845 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.061463 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.072864 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.103170 4967 scope.go:117] "RemoveContainer" containerID="7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.103883 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5\": container with ID starting with 7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5 not found: ID does not exist" containerID="7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.103917 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5"} err="failed to get container status \"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5\": rpc error: code = NotFound desc = could not find container \"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5\": container with ID starting with 7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5 not found: ID does not exist" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.104076 4967 scope.go:117] "RemoveContainer" containerID="db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" Nov 21 15:59:11 crc kubenswrapper[4967]: E1121 15:59:11.104683 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d\": container with ID starting with db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d not found: ID does not exist" containerID="db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.104718 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d"} err="failed to get container status \"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d\": rpc error: code = NotFound desc = could not find container \"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d\": container with ID starting with db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d not found: ID does not exist" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.104743 4967 scope.go:117] "RemoveContainer" containerID="7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.105020 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5"} err="failed to get container status \"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5\": rpc error: code = NotFound desc = could not find container \"7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5\": container with ID starting with 7962296d7c2ea525500212d73986ab60567159284ca5a5de923b903b6c059fd5 not found: ID does not exist" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.105102 4967 scope.go:117] "RemoveContainer" containerID="db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.105374 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d"} err="failed to get container status \"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d\": rpc error: code = NotFound desc = could not find container \"db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d\": container with ID starting with db23485f8153cfae569a5d231b63bd47205627f4325b90bfc2d8e0f1217f4e2d not found: ID does not exist" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.152559 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.155726 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.155802 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.155827 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.155923 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dwzz\" (UniqueName: \"kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.258466 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dwzz\" (UniqueName: \"kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.258948 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.259114 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.259266 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.259616 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.266963 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.268035 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.290038 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dwzz\" (UniqueName: \"kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz\") pod \"nova-api-0\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.393971 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.704178 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.775854 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data\") pod \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.776142 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle\") pod \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.776235 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rv4c4\" (UniqueName: \"kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4\") pod \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\" (UID: \"0d765a37-c8ac-4fc9-b550-5eaf97deaf09\") " Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.782904 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4" (OuterVolumeSpecName: "kube-api-access-rv4c4") pod "0d765a37-c8ac-4fc9-b550-5eaf97deaf09" (UID: "0d765a37-c8ac-4fc9-b550-5eaf97deaf09"). InnerVolumeSpecName "kube-api-access-rv4c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.821869 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data" (OuterVolumeSpecName: "config-data") pod "0d765a37-c8ac-4fc9-b550-5eaf97deaf09" (UID: "0d765a37-c8ac-4fc9-b550-5eaf97deaf09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.824297 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d765a37-c8ac-4fc9-b550-5eaf97deaf09" (UID: "0d765a37-c8ac-4fc9-b550-5eaf97deaf09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.882914 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.882964 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rv4c4\" (UniqueName: \"kubernetes.io/projected/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-kube-api-access-rv4c4\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.882981 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d765a37-c8ac-4fc9-b550-5eaf97deaf09-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.935791 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:11 crc kubenswrapper[4967]: W1121 15:59:11.937155 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57790c9e_cc4b_41f4_aaa5_8c8b0ee09288.slice/crio-2437e6f36c401b40dbdbd24188c024e027e88eb250237a9683d847d1bebe40cf WatchSource:0}: Error finding container 2437e6f36c401b40dbdbd24188c024e027e88eb250237a9683d847d1bebe40cf: Status 404 returned error can't find the container with id 2437e6f36c401b40dbdbd24188c024e027e88eb250237a9683d847d1bebe40cf Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.987292 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerStarted","Data":"2437e6f36c401b40dbdbd24188c024e027e88eb250237a9683d847d1bebe40cf"} Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.994628 4967 generic.go:334] "Generic (PLEG): container finished" podID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" containerID="38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae" exitCode=0 Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.994688 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d765a37-c8ac-4fc9-b550-5eaf97deaf09","Type":"ContainerDied","Data":"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae"} Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.994785 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d765a37-c8ac-4fc9-b550-5eaf97deaf09","Type":"ContainerDied","Data":"e049475e7ce5bcf84033e0e641c0dd3a2055d2a5acd982e69abb985bc6500c20"} Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.994806 4967 scope.go:117] "RemoveContainer" containerID="38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae" Nov 21 15:59:11 crc kubenswrapper[4967]: I1121 15:59:11.995189 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.036864 4967 scope.go:117] "RemoveContainer" containerID="38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae" Nov 21 15:59:12 crc kubenswrapper[4967]: E1121 15:59:12.037565 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae\": container with ID starting with 38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae not found: ID does not exist" containerID="38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.037624 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae"} err="failed to get container status \"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae\": rpc error: code = NotFound desc = could not find container \"38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae\": container with ID starting with 38d9653dcba48edb17f450b3c3bd73ed3b5349eef0b565b018c83801434c50ae not found: ID does not exist" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.098766 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.113962 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.130303 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:12 crc kubenswrapper[4967]: E1121 15:59:12.130980 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" containerName="nova-scheduler-scheduler" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.131000 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" containerName="nova-scheduler-scheduler" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.131283 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" containerName="nova-scheduler-scheduler" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.132272 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.136292 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.143528 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.190055 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.190100 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.190236 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5k4jz\" (UniqueName: \"kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.292065 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.292123 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.292220 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5k4jz\" (UniqueName: \"kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.296818 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.297336 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.309920 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5k4jz\" (UniqueName: \"kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz\") pod \"nova-scheduler-0\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.455235 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.555530 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678" path="/var/lib/kubelet/pods/0b3cab8b-d91c-44b1-8d42-0f0c1aa6a678/volumes" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.556779 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d765a37-c8ac-4fc9-b550-5eaf97deaf09" path="/var/lib/kubelet/pods/0d765a37-c8ac-4fc9-b550-5eaf97deaf09/volumes" Nov 21 15:59:12 crc kubenswrapper[4967]: I1121 15:59:12.925988 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:12 crc kubenswrapper[4967]: W1121 15:59:12.926891 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55709a17_4e2d_462e_ade7_ffde9eff2488.slice/crio-56c2e89516307ad64416f72cbe8733e885de2a330fae80bda50322a86ea3aac6 WatchSource:0}: Error finding container 56c2e89516307ad64416f72cbe8733e885de2a330fae80bda50322a86ea3aac6: Status 404 returned error can't find the container with id 56c2e89516307ad64416f72cbe8733e885de2a330fae80bda50322a86ea3aac6 Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.008807 4967 generic.go:334] "Generic (PLEG): container finished" podID="827df8c5-068d-48b6-af4d-f971bdacdcb3" containerID="836e3046d9e0e13c969f1e742e556cd8f9ecd3056d0193761cfce2ef1a63c2a9" exitCode=0 Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.008853 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" event={"ID":"827df8c5-068d-48b6-af4d-f971bdacdcb3","Type":"ContainerDied","Data":"836e3046d9e0e13c969f1e742e556cd8f9ecd3056d0193761cfce2ef1a63c2a9"} Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.014525 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerStarted","Data":"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6"} Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.014563 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerStarted","Data":"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e"} Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.017290 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55709a17-4e2d-462e-ade7-ffde9eff2488","Type":"ContainerStarted","Data":"56c2e89516307ad64416f72cbe8733e885de2a330fae80bda50322a86ea3aac6"} Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.017386 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kgfq8" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="registry-server" containerID="cri-o://f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2" gracePeriod=2 Nov 21 15:59:13 crc kubenswrapper[4967]: I1121 15:59:13.053872 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.053846329 podStartE2EDuration="3.053846329s" podCreationTimestamp="2025-11-21 15:59:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:13.045671139 +0000 UTC m=+1441.304192147" watchObservedRunningTime="2025-11-21 15:59:13.053846329 +0000 UTC m=+1441.312367347" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.494238 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.523287 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctcsd\" (UniqueName: \"kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd\") pod \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.524216 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content\") pod \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.526190 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities\") pod \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\" (UID: \"e53b7b96-ffc9-40af-b2c4-9c6b835f570d\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.529176 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd" (OuterVolumeSpecName: "kube-api-access-ctcsd") pod "e53b7b96-ffc9-40af-b2c4-9c6b835f570d" (UID: "e53b7b96-ffc9-40af-b2c4-9c6b835f570d"). InnerVolumeSpecName "kube-api-access-ctcsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.529523 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities" (OuterVolumeSpecName: "utilities") pod "e53b7b96-ffc9-40af-b2c4-9c6b835f570d" (UID: "e53b7b96-ffc9-40af-b2c4-9c6b835f570d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.531938 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.531965 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctcsd\" (UniqueName: \"kubernetes.io/projected/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-kube-api-access-ctcsd\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.574250 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e53b7b96-ffc9-40af-b2c4-9c6b835f570d" (UID: "e53b7b96-ffc9-40af-b2c4-9c6b835f570d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.634179 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e53b7b96-ffc9-40af-b2c4-9c6b835f570d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:13.970799 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.019632 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.033100 4967 generic.go:334] "Generic (PLEG): container finished" podID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerID="f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2" exitCode=0 Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.033178 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerDied","Data":"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2"} Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.033211 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgfq8" event={"ID":"e53b7b96-ffc9-40af-b2c4-9c6b835f570d","Type":"ContainerDied","Data":"683938c72621959684e5d00dc647ab581b546864dee7b2c5409c44c554a1a664"} Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.033204 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgfq8" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.033240 4967 scope.go:117] "RemoveContainer" containerID="f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.035456 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55709a17-4e2d-462e-ade7-ffde9eff2488","Type":"ContainerStarted","Data":"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482"} Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.059409 4967 scope.go:117] "RemoveContainer" containerID="76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.068387 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.068371562 podStartE2EDuration="2.068371562s" podCreationTimestamp="2025-11-21 15:59:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:14.065959081 +0000 UTC m=+1442.324480079" watchObservedRunningTime="2025-11-21 15:59:14.068371562 +0000 UTC m=+1442.326892570" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.095542 4967 scope.go:117] "RemoveContainer" containerID="7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.098257 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.110411 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kgfq8"] Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.144074 4967 scope.go:117] "RemoveContainer" containerID="f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:14.144749 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2\": container with ID starting with f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2 not found: ID does not exist" containerID="f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.144798 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2"} err="failed to get container status \"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2\": rpc error: code = NotFound desc = could not find container \"f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2\": container with ID starting with f21b68ef9551802a6d16b75cd747760a580dd43a72ef36de94c03c3d8717ffc2 not found: ID does not exist" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.144830 4967 scope.go:117] "RemoveContainer" containerID="76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:14.145211 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6\": container with ID starting with 76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6 not found: ID does not exist" containerID="76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.145239 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6"} err="failed to get container status \"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6\": rpc error: code = NotFound desc = could not find container \"76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6\": container with ID starting with 76566519092bbe06487dbd01a919c80ccf7c2170b6f994a61ac514ae5b87adb6 not found: ID does not exist" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.145260 4967 scope.go:117] "RemoveContainer" containerID="7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:14.145554 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013\": container with ID starting with 7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013 not found: ID does not exist" containerID="7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.145597 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013"} err="failed to get container status \"7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013\": rpc error: code = NotFound desc = could not find container \"7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013\": container with ID starting with 7a9a334eddb8f99bf372b76ec9ef64ee184996973632148f2adefb63a1ba4013 not found: ID does not exist" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.489484 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.553219 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" path="/var/lib/kubelet/pods/e53b7b96-ffc9-40af-b2c4-9c6b835f570d/volumes" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.558112 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfhxn\" (UniqueName: \"kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn\") pod \"827df8c5-068d-48b6-af4d-f971bdacdcb3\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.558440 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts\") pod \"827df8c5-068d-48b6-af4d-f971bdacdcb3\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.558503 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data\") pod \"827df8c5-068d-48b6-af4d-f971bdacdcb3\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.558601 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle\") pod \"827df8c5-068d-48b6-af4d-f971bdacdcb3\" (UID: \"827df8c5-068d-48b6-af4d-f971bdacdcb3\") " Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.564349 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts" (OuterVolumeSpecName: "scripts") pod "827df8c5-068d-48b6-af4d-f971bdacdcb3" (UID: "827df8c5-068d-48b6-af4d-f971bdacdcb3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.564577 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn" (OuterVolumeSpecName: "kube-api-access-xfhxn") pod "827df8c5-068d-48b6-af4d-f971bdacdcb3" (UID: "827df8c5-068d-48b6-af4d-f971bdacdcb3"). InnerVolumeSpecName "kube-api-access-xfhxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.592216 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "827df8c5-068d-48b6-af4d-f971bdacdcb3" (UID: "827df8c5-068d-48b6-af4d-f971bdacdcb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.594402 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data" (OuterVolumeSpecName: "config-data") pod "827df8c5-068d-48b6-af4d-f971bdacdcb3" (UID: "827df8c5-068d-48b6-af4d-f971bdacdcb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.662104 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.662142 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.662152 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/827df8c5-068d-48b6-af4d-f971bdacdcb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:14.662164 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfhxn\" (UniqueName: \"kubernetes.io/projected/827df8c5-068d-48b6-af4d-f971bdacdcb3-kube-api-access-xfhxn\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.048620 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" event={"ID":"827df8c5-068d-48b6-af4d-f971bdacdcb3","Type":"ContainerDied","Data":"04e9fc1f2936d76821a242f5c1da2360688dd62b1c803fdd1b7b3aaaf62c4b4d"} Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.048661 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04e9fc1f2936d76821a242f5c1da2360688dd62b1c803fdd1b7b3aaaf62c4b4d" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.048733 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-z9vxs" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.123170 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:15.123774 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="extract-content" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.123793 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="extract-content" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:15.123815 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827df8c5-068d-48b6-af4d-f971bdacdcb3" containerName="nova-cell1-conductor-db-sync" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.123823 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="827df8c5-068d-48b6-af4d-f971bdacdcb3" containerName="nova-cell1-conductor-db-sync" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:15.123835 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="extract-utilities" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.123842 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="extract-utilities" Nov 21 15:59:15 crc kubenswrapper[4967]: E1121 15:59:15.123875 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="registry-server" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.123883 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="registry-server" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.124118 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="827df8c5-068d-48b6-af4d-f971bdacdcb3" containerName="nova-cell1-conductor-db-sync" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.124129 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e53b7b96-ffc9-40af-b2c4-9c6b835f570d" containerName="registry-server" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.124987 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.129940 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.136668 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.171208 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ct2t\" (UniqueName: \"kubernetes.io/projected/4d004044-caa6-4813-9747-e18ca2f2ba9d-kube-api-access-4ct2t\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.171527 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.171686 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.274215 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ct2t\" (UniqueName: \"kubernetes.io/projected/4d004044-caa6-4813-9747-e18ca2f2ba9d-kube-api-access-4ct2t\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.274448 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.274507 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.279030 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.282154 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d004044-caa6-4813-9747-e18ca2f2ba9d-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.292169 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ct2t\" (UniqueName: \"kubernetes.io/projected/4d004044-caa6-4813-9747-e18ca2f2ba9d-kube-api-access-4ct2t\") pod \"nova-cell1-conductor-0\" (UID: \"4d004044-caa6-4813-9747-e18ca2f2ba9d\") " pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:15 crc kubenswrapper[4967]: I1121 15:59:15.451121 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:16 crc kubenswrapper[4967]: W1121 15:59:16.058292 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d004044_caa6_4813_9747_e18ca2f2ba9d.slice/crio-08048ea7c23e242d29b459c2d8b21a0dc16a2dd637e15d1aeac6b2f17de7eae4 WatchSource:0}: Error finding container 08048ea7c23e242d29b459c2d8b21a0dc16a2dd637e15d1aeac6b2f17de7eae4: Status 404 returned error can't find the container with id 08048ea7c23e242d29b459c2d8b21a0dc16a2dd637e15d1aeac6b2f17de7eae4 Nov 21 15:59:16 crc kubenswrapper[4967]: I1121 15:59:16.060350 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 15:59:16 crc kubenswrapper[4967]: I1121 15:59:16.256366 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:59:16 crc kubenswrapper[4967]: I1121 15:59:16.256619 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2bxgl" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="registry-server" containerID="cri-o://680cad6b36778fb418bf89136ac7828862421c211a05cd5f5f442cea202fb7bb" gracePeriod=2 Nov 21 15:59:16 crc kubenswrapper[4967]: I1121 15:59:16.411136 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:16 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:16 crc kubenswrapper[4967]: > Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.080913 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4d004044-caa6-4813-9747-e18ca2f2ba9d","Type":"ContainerStarted","Data":"c3a85b0bd481b8fbd57b64c71acb754704e1bf492094ac3664047a103b31f4ad"} Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.080959 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"4d004044-caa6-4813-9747-e18ca2f2ba9d","Type":"ContainerStarted","Data":"08048ea7c23e242d29b459c2d8b21a0dc16a2dd637e15d1aeac6b2f17de7eae4"} Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.081037 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.084231 4967 generic.go:334] "Generic (PLEG): container finished" podID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerID="680cad6b36778fb418bf89136ac7828862421c211a05cd5f5f442cea202fb7bb" exitCode=0 Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.084272 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerDied","Data":"680cad6b36778fb418bf89136ac7828862421c211a05cd5f5f442cea202fb7bb"} Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.099296 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.099279098 podStartE2EDuration="2.099279098s" podCreationTimestamp="2025-11-21 15:59:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:17.097546847 +0000 UTC m=+1445.356067875" watchObservedRunningTime="2025-11-21 15:59:17.099279098 +0000 UTC m=+1445.357800106" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.382779 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.396721 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.446286 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqxkk\" (UniqueName: \"kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk\") pod \"e088bef1-d23d-47a1-b266-6493f8fe0507\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.446451 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content\") pod \"e088bef1-d23d-47a1-b266-6493f8fe0507\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.446487 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities\") pod \"e088bef1-d23d-47a1-b266-6493f8fe0507\" (UID: \"e088bef1-d23d-47a1-b266-6493f8fe0507\") " Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.447809 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities" (OuterVolumeSpecName: "utilities") pod "e088bef1-d23d-47a1-b266-6493f8fe0507" (UID: "e088bef1-d23d-47a1-b266-6493f8fe0507"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.453753 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk" (OuterVolumeSpecName: "kube-api-access-mqxkk") pod "e088bef1-d23d-47a1-b266-6493f8fe0507" (UID: "e088bef1-d23d-47a1-b266-6493f8fe0507"). InnerVolumeSpecName "kube-api-access-mqxkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.455831 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.468393 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e088bef1-d23d-47a1-b266-6493f8fe0507" (UID: "e088bef1-d23d-47a1-b266-6493f8fe0507"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.550792 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqxkk\" (UniqueName: \"kubernetes.io/projected/e088bef1-d23d-47a1-b266-6493f8fe0507-kube-api-access-mqxkk\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.551165 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:17 crc kubenswrapper[4967]: I1121 15:59:17.551259 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e088bef1-d23d-47a1-b266-6493f8fe0507-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.098065 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bxgl" Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.098982 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bxgl" event={"ID":"e088bef1-d23d-47a1-b266-6493f8fe0507","Type":"ContainerDied","Data":"63cfd74a4c7919cb3d47d70ac3038fbb99584401dfb2ed31236a17bdf6556efb"} Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.099066 4967 scope.go:117] "RemoveContainer" containerID="680cad6b36778fb418bf89136ac7828862421c211a05cd5f5f442cea202fb7bb" Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.140011 4967 scope.go:117] "RemoveContainer" containerID="8a626686d0ac820a64f38a9f9f4f4a22baef94c17eb8be10856ac7f85b955ddc" Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.151158 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.162011 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bxgl"] Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.163345 4967 scope.go:117] "RemoveContainer" containerID="add509dd55319fd519fd276fb0d51a2295e888243aa7ad48f667cf1c8a81b779" Nov 21 15:59:18 crc kubenswrapper[4967]: I1121 15:59:18.552303 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" path="/var/lib/kubelet/pods/e088bef1-d23d-47a1-b266-6493f8fe0507/volumes" Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.367685 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.368473 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerName="kube-state-metrics" containerID="cri-o://f4a06cec5ab2b2de6390165c050102c03d3b18d9446865f54b6d8a53ab506e14" gracePeriod=30 Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.395075 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.395245 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.440590 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.441030 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="58555aef-0397-4247-be17-7efcbbb36fca" containerName="mysqld-exporter" containerID="cri-o://283b83a10d41bb506e7248dfdaf806cfc0c86d4c413d026a02a55337e15abd16" gracePeriod=30 Nov 21 15:59:21 crc kubenswrapper[4967]: I1121 15:59:21.748178 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerName="kube-state-metrics" probeResult="failure" output="Get \"http://10.217.0.135:8081/readyz\": dial tcp 10.217.0.135:8081: connect: connection refused" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.150013 4967 generic.go:334] "Generic (PLEG): container finished" podID="58555aef-0397-4247-be17-7efcbbb36fca" containerID="283b83a10d41bb506e7248dfdaf806cfc0c86d4c413d026a02a55337e15abd16" exitCode=2 Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.150095 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"58555aef-0397-4247-be17-7efcbbb36fca","Type":"ContainerDied","Data":"283b83a10d41bb506e7248dfdaf806cfc0c86d4c413d026a02a55337e15abd16"} Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.152402 4967 generic.go:334] "Generic (PLEG): container finished" podID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerID="f4a06cec5ab2b2de6390165c050102c03d3b18d9446865f54b6d8a53ab506e14" exitCode=2 Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.152541 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f","Type":"ContainerDied","Data":"f4a06cec5ab2b2de6390165c050102c03d3b18d9446865f54b6d8a53ab506e14"} Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.455608 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.483340 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.246:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.483445 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.246:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.503892 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.569516 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.673059 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle\") pod \"58555aef-0397-4247-be17-7efcbbb36fca\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.673669 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9gqt\" (UniqueName: \"kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt\") pod \"58555aef-0397-4247-be17-7efcbbb36fca\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.673756 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data\") pod \"58555aef-0397-4247-be17-7efcbbb36fca\" (UID: \"58555aef-0397-4247-be17-7efcbbb36fca\") " Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.679716 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt" (OuterVolumeSpecName: "kube-api-access-d9gqt") pod "58555aef-0397-4247-be17-7efcbbb36fca" (UID: "58555aef-0397-4247-be17-7efcbbb36fca"). InnerVolumeSpecName "kube-api-access-d9gqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.753364 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58555aef-0397-4247-be17-7efcbbb36fca" (UID: "58555aef-0397-4247-be17-7efcbbb36fca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.771151 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data" (OuterVolumeSpecName: "config-data") pod "58555aef-0397-4247-be17-7efcbbb36fca" (UID: "58555aef-0397-4247-be17-7efcbbb36fca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.778878 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.778914 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9gqt\" (UniqueName: \"kubernetes.io/projected/58555aef-0397-4247-be17-7efcbbb36fca-kube-api-access-d9gqt\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.778927 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58555aef-0397-4247-be17-7efcbbb36fca-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.854943 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.880403 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhgx5\" (UniqueName: \"kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5\") pod \"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f\" (UID: \"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f\") " Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.883571 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5" (OuterVolumeSpecName: "kube-api-access-hhgx5") pod "52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" (UID: "52658e97-d6ec-4a6d-ac6b-a5168a5ab42f"). InnerVolumeSpecName "kube-api-access-hhgx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:22 crc kubenswrapper[4967]: I1121 15:59:22.983665 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhgx5\" (UniqueName: \"kubernetes.io/projected/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f-kube-api-access-hhgx5\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.166086 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.166072 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"52658e97-d6ec-4a6d-ac6b-a5168a5ab42f","Type":"ContainerDied","Data":"f29eeda15d8ce06646ef08c9c2f0c59a70413d50ca762dc9e72b3fef36ba234b"} Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.166635 4967 scope.go:117] "RemoveContainer" containerID="f4a06cec5ab2b2de6390165c050102c03d3b18d9446865f54b6d8a53ab506e14" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.169125 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"58555aef-0397-4247-be17-7efcbbb36fca","Type":"ContainerDied","Data":"ff0b6a272dd7f48b8b450f64f9827e7afab6ff6b2692e94d193e85aca03ca4e5"} Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.169151 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.219811 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.230439 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.264204 4967 scope.go:117] "RemoveContainer" containerID="283b83a10d41bb506e7248dfdaf806cfc0c86d4c413d026a02a55337e15abd16" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.310178 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.332359 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: E1121 15:59:23.333633 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="extract-content" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.333666 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="extract-content" Nov 21 15:59:23 crc kubenswrapper[4967]: E1121 15:59:23.333701 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerName="kube-state-metrics" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.333708 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerName="kube-state-metrics" Nov 21 15:59:23 crc kubenswrapper[4967]: E1121 15:59:23.333741 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="extract-utilities" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.333750 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="extract-utilities" Nov 21 15:59:23 crc kubenswrapper[4967]: E1121 15:59:23.333782 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58555aef-0397-4247-be17-7efcbbb36fca" containerName="mysqld-exporter" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.333789 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="58555aef-0397-4247-be17-7efcbbb36fca" containerName="mysqld-exporter" Nov 21 15:59:23 crc kubenswrapper[4967]: E1121 15:59:23.333804 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="registry-server" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.333811 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="registry-server" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.334226 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e088bef1-d23d-47a1-b266-6493f8fe0507" containerName="registry-server" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.334284 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" containerName="kube-state-metrics" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.334303 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="58555aef-0397-4247-be17-7efcbbb36fca" containerName="mysqld-exporter" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.335523 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.340509 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.340853 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.367379 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.378027 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.389551 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.407698 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.409404 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.413609 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.413806 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.422607 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.442037 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-config-data\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.442176 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.442283 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwqnm\" (UniqueName: \"kubernetes.io/projected/b6c61443-128e-48f7-9753-0283a3a7d3ba-kube-api-access-dwqnm\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.442356 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.492749 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.493021 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-central-agent" containerID="cri-o://35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" gracePeriod=30 Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.493084 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="proxy-httpd" containerID="cri-o://6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" gracePeriod=30 Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.493138 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="sg-core" containerID="cri-o://1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" gracePeriod=30 Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.493156 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-notification-agent" containerID="cri-o://ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" gracePeriod=30 Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.545617 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-config-data\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.545805 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.545842 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hwrx\" (UniqueName: \"kubernetes.io/projected/48d66520-3487-4842-b42f-5db405361e11-kube-api-access-8hwrx\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.546083 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.546150 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.546359 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwqnm\" (UniqueName: \"kubernetes.io/projected/b6c61443-128e-48f7-9753-0283a3a7d3ba-kube-api-access-dwqnm\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.546401 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.547023 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.550413 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-config-data\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.550785 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.551775 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/b6c61443-128e-48f7-9753-0283a3a7d3ba-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.567470 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwqnm\" (UniqueName: \"kubernetes.io/projected/b6c61443-128e-48f7-9753-0283a3a7d3ba-kube-api-access-dwqnm\") pod \"mysqld-exporter-0\" (UID: \"b6c61443-128e-48f7-9753-0283a3a7d3ba\") " pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.651280 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.653139 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.654181 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.654251 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hwrx\" (UniqueName: \"kubernetes.io/projected/48d66520-3487-4842-b42f-5db405361e11-kube-api-access-8hwrx\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.657304 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.659286 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.661069 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/48d66520-3487-4842-b42f-5db405361e11-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.663694 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.671863 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hwrx\" (UniqueName: \"kubernetes.io/projected/48d66520-3487-4842-b42f-5db405361e11-kube-api-access-8hwrx\") pod \"kube-state-metrics-0\" (UID: \"48d66520-3487-4842-b42f-5db405361e11\") " pod="openstack/kube-state-metrics-0" Nov 21 15:59:23 crc kubenswrapper[4967]: I1121 15:59:23.734777 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.193256 4967 generic.go:334] "Generic (PLEG): container finished" podID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerID="6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" exitCode=0 Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.193593 4967 generic.go:334] "Generic (PLEG): container finished" podID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerID="1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" exitCode=2 Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.193338 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerDied","Data":"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c"} Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.193634 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerDied","Data":"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8"} Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.220100 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Nov 21 15:59:24 crc kubenswrapper[4967]: W1121 15:59:24.276690 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6c61443_128e_48f7_9753_0283a3a7d3ba.slice/crio-0849a8aadc5c4dfec6feb22ccaac32b8d77a8b0a8653e9374093671756be32e7 WatchSource:0}: Error finding container 0849a8aadc5c4dfec6feb22ccaac32b8d77a8b0a8653e9374093671756be32e7: Status 404 returned error can't find the container with id 0849a8aadc5c4dfec6feb22ccaac32b8d77a8b0a8653e9374093671756be32e7 Nov 21 15:59:24 crc kubenswrapper[4967]: W1121 15:59:24.327367 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48d66520_3487_4842_b42f_5db405361e11.slice/crio-44f9405392f4a0d82f50ae83380d64fdd5888543d13285dec9d068386e64d7d4 WatchSource:0}: Error finding container 44f9405392f4a0d82f50ae83380d64fdd5888543d13285dec9d068386e64d7d4: Status 404 returned error can't find the container with id 44f9405392f4a0d82f50ae83380d64fdd5888543d13285dec9d068386e64d7d4 Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.329243 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.550597 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52658e97-d6ec-4a6d-ac6b-a5168a5ab42f" path="/var/lib/kubelet/pods/52658e97-d6ec-4a6d-ac6b-a5168a5ab42f/volumes" Nov 21 15:59:24 crc kubenswrapper[4967]: I1121 15:59:24.561497 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58555aef-0397-4247-be17-7efcbbb36fca" path="/var/lib/kubelet/pods/58555aef-0397-4247-be17-7efcbbb36fca/volumes" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.188425 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.207931 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"48d66520-3487-4842-b42f-5db405361e11","Type":"ContainerStarted","Data":"44f9405392f4a0d82f50ae83380d64fdd5888543d13285dec9d068386e64d7d4"} Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.209413 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"b6c61443-128e-48f7-9753-0283a3a7d3ba","Type":"ContainerStarted","Data":"0849a8aadc5c4dfec6feb22ccaac32b8d77a8b0a8653e9374093671756be32e7"} Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218536 4967 generic.go:334] "Generic (PLEG): container finished" podID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerID="ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" exitCode=0 Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218601 4967 generic.go:334] "Generic (PLEG): container finished" podID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerID="35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" exitCode=0 Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218644 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerDied","Data":"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54"} Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218705 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerDied","Data":"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a"} Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218726 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"268acb69-9196-47c9-8f92-b8b16d63a4a9","Type":"ContainerDied","Data":"1cbab925f72ff4b4f3400186b6573feea63645f3fd919099c82f1228fd1783b9"} Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218757 4967 scope.go:117] "RemoveContainer" containerID="6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.218948 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.293840 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.293929 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.294084 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.294173 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.294199 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpn4z\" (UniqueName: \"kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.294350 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.294457 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd\") pod \"268acb69-9196-47c9-8f92-b8b16d63a4a9\" (UID: \"268acb69-9196-47c9-8f92-b8b16d63a4a9\") " Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.295203 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.295486 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.299970 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z" (OuterVolumeSpecName: "kube-api-access-vpn4z") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "kube-api-access-vpn4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.302820 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts" (OuterVolumeSpecName: "scripts") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.332794 4967 scope.go:117] "RemoveContainer" containerID="1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.339148 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.397433 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpn4z\" (UniqueName: \"kubernetes.io/projected/268acb69-9196-47c9-8f92-b8b16d63a4a9-kube-api-access-vpn4z\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.397462 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.397472 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/268acb69-9196-47c9-8f92-b8b16d63a4a9-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.397479 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.397489 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.402634 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.447259 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data" (OuterVolumeSpecName: "config-data") pod "268acb69-9196-47c9-8f92-b8b16d63a4a9" (UID: "268acb69-9196-47c9-8f92-b8b16d63a4a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.484425 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.499838 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.499880 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/268acb69-9196-47c9-8f92-b8b16d63a4a9-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.556614 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.578180 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.591898 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:25 crc kubenswrapper[4967]: E1121 15:59:25.592539 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-notification-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592564 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-notification-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: E1121 15:59:25.592594 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="proxy-httpd" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592600 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="proxy-httpd" Nov 21 15:59:25 crc kubenswrapper[4967]: E1121 15:59:25.592616 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-central-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592622 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-central-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: E1121 15:59:25.592656 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="sg-core" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592664 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="sg-core" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592869 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="sg-core" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592896 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="proxy-httpd" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592908 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-notification-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.592923 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" containerName="ceilometer-central-agent" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.595051 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.596885 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.597979 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.598900 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.599600 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.705958 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706011 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706133 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvv9z\" (UniqueName: \"kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706156 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706197 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706276 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706374 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.706476 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.800512 4967 scope.go:117] "RemoveContainer" containerID="ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810247 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvv9z\" (UniqueName: \"kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810296 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810354 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810461 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810534 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810592 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.810980 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.811154 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.811188 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.813028 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.814790 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.815666 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.819209 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.819617 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.821815 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.831108 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvv9z\" (UniqueName: \"kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z\") pod \"ceilometer-0\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " pod="openstack/ceilometer-0" Nov 21 15:59:25 crc kubenswrapper[4967]: I1121 15:59:25.975605 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.077271 4967 scope.go:117] "RemoveContainer" containerID="35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.243872 4967 scope.go:117] "RemoveContainer" containerID="6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" Nov 21 15:59:26 crc kubenswrapper[4967]: E1121 15:59:26.244553 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c\": container with ID starting with 6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c not found: ID does not exist" containerID="6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.244596 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c"} err="failed to get container status \"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c\": rpc error: code = NotFound desc = could not find container \"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c\": container with ID starting with 6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.244616 4967 scope.go:117] "RemoveContainer" containerID="1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" Nov 21 15:59:26 crc kubenswrapper[4967]: E1121 15:59:26.244834 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8\": container with ID starting with 1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8 not found: ID does not exist" containerID="1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.244858 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8"} err="failed to get container status \"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8\": rpc error: code = NotFound desc = could not find container \"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8\": container with ID starting with 1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8 not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.244872 4967 scope.go:117] "RemoveContainer" containerID="ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" Nov 21 15:59:26 crc kubenswrapper[4967]: E1121 15:59:26.245076 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54\": container with ID starting with ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54 not found: ID does not exist" containerID="ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.245096 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54"} err="failed to get container status \"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54\": rpc error: code = NotFound desc = could not find container \"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54\": container with ID starting with ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54 not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.245110 4967 scope.go:117] "RemoveContainer" containerID="35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" Nov 21 15:59:26 crc kubenswrapper[4967]: E1121 15:59:26.245429 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a\": container with ID starting with 35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a not found: ID does not exist" containerID="35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.245453 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a"} err="failed to get container status \"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a\": rpc error: code = NotFound desc = could not find container \"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a\": container with ID starting with 35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.245470 4967 scope.go:117] "RemoveContainer" containerID="6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247196 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c"} err="failed to get container status \"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c\": rpc error: code = NotFound desc = could not find container \"6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c\": container with ID starting with 6305fbb4a10b9ac292d63c3e58d662c20257d7853977bdae9b878c4a0fcd297c not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247225 4967 scope.go:117] "RemoveContainer" containerID="1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247429 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8"} err="failed to get container status \"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8\": rpc error: code = NotFound desc = could not find container \"1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8\": container with ID starting with 1b6d94ab75ffdfca29f547cf87959ee9bfb21419e44c982562e3cd1f8fd4fce8 not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247443 4967 scope.go:117] "RemoveContainer" containerID="ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247613 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54"} err="failed to get container status \"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54\": rpc error: code = NotFound desc = could not find container \"ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54\": container with ID starting with ae40ea6ce7cc5aced97eb25210f3ca82dd26ae6a372a394bac3ced2a89b54a54 not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247627 4967 scope.go:117] "RemoveContainer" containerID="35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.247931 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a"} err="failed to get container status \"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a\": rpc error: code = NotFound desc = could not find container \"35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a\": container with ID starting with 35f09dc9af827da1bbff47e78b542bedd436ee6b35880884630666597c3eba6a not found: ID does not exist" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.416036 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:26 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:26 crc kubenswrapper[4967]: > Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.549876 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="268acb69-9196-47c9-8f92-b8b16d63a4a9" path="/var/lib/kubelet/pods/268acb69-9196-47c9-8f92-b8b16d63a4a9/volumes" Nov 21 15:59:26 crc kubenswrapper[4967]: I1121 15:59:26.584018 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:26 crc kubenswrapper[4967]: W1121 15:59:26.585268 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf96ef36_2101_4339_b124_d6df69373e2b.slice/crio-9f1810f1a9fab4ffcfaaf610c06cf1e1923ab3196d7c68313a6146e7db0b7aa1 WatchSource:0}: Error finding container 9f1810f1a9fab4ffcfaaf610c06cf1e1923ab3196d7c68313a6146e7db0b7aa1: Status 404 returned error can't find the container with id 9f1810f1a9fab4ffcfaaf610c06cf1e1923ab3196d7c68313a6146e7db0b7aa1 Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.248502 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerStarted","Data":"9f1810f1a9fab4ffcfaaf610c06cf1e1923ab3196d7c68313a6146e7db0b7aa1"} Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.250333 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"48d66520-3487-4842-b42f-5db405361e11","Type":"ContainerStarted","Data":"ecd44c854ef72085c4981dd9f8260ae9992c5727de1a602e8e07669c320c00b5"} Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.250500 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.251950 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"b6c61443-128e-48f7-9753-0283a3a7d3ba","Type":"ContainerStarted","Data":"63a4529f676bb68ad987c98f80934dc975ac8654f124d86819a7ba234a446947"} Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.272460 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.801807404 podStartE2EDuration="4.272443895s" podCreationTimestamp="2025-11-21 15:59:23 +0000 UTC" firstStartedPulling="2025-11-21 15:59:24.330239207 +0000 UTC m=+1452.588760215" lastFinishedPulling="2025-11-21 15:59:25.800875698 +0000 UTC m=+1454.059396706" observedRunningTime="2025-11-21 15:59:27.266619304 +0000 UTC m=+1455.525140322" watchObservedRunningTime="2025-11-21 15:59:27.272443895 +0000 UTC m=+1455.530964913" Nov 21 15:59:27 crc kubenswrapper[4967]: I1121 15:59:27.290255 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.492225704 podStartE2EDuration="4.290230598s" podCreationTimestamp="2025-11-21 15:59:23 +0000 UTC" firstStartedPulling="2025-11-21 15:59:24.279415033 +0000 UTC m=+1452.537936041" lastFinishedPulling="2025-11-21 15:59:26.077419927 +0000 UTC m=+1454.335940935" observedRunningTime="2025-11-21 15:59:27.287605211 +0000 UTC m=+1455.546126219" watchObservedRunningTime="2025-11-21 15:59:27.290230598 +0000 UTC m=+1455.548751596" Nov 21 15:59:29 crc kubenswrapper[4967]: I1121 15:59:29.274416 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerStarted","Data":"65d1bf90843c75f050fc3d820c7983839e42e9988e857950c09c891f2d73a840"} Nov 21 15:59:31 crc kubenswrapper[4967]: I1121 15:59:31.398110 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 15:59:31 crc kubenswrapper[4967]: I1121 15:59:31.398798 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 15:59:31 crc kubenswrapper[4967]: I1121 15:59:31.400660 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 15:59:31 crc kubenswrapper[4967]: I1121 15:59:31.400752 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.312160 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerStarted","Data":"502cc693daa70b71e2f428a31d8e522c294126ec19244e5be6e44bf6a1ff14c1"} Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.312751 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.323581 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.511026 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.514621 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.526724 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568016 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568080 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwxx6\" (UniqueName: \"kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568155 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568385 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568535 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.568680 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.677712 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.677758 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwxx6\" (UniqueName: \"kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.677806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.677941 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.677983 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.678076 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.679000 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.679546 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.683708 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.683957 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.686579 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.698074 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwxx6\" (UniqueName: \"kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6\") pod \"dnsmasq-dns-f84f9ccf-kfxx4\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:32 crc kubenswrapper[4967]: I1121 15:59:32.863125 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:33 crc kubenswrapper[4967]: W1121 15:59:33.427115 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod654b54a8_f5b2_480d_806c_b1c9a8b51e21.slice/crio-3ca001a5094d6192d331ef1e12cd1cf06bdab5ebdbafb40fbae9403881d28a82 WatchSource:0}: Error finding container 3ca001a5094d6192d331ef1e12cd1cf06bdab5ebdbafb40fbae9403881d28a82: Status 404 returned error can't find the container with id 3ca001a5094d6192d331ef1e12cd1cf06bdab5ebdbafb40fbae9403881d28a82 Nov 21 15:59:33 crc kubenswrapper[4967]: I1121 15:59:33.431202 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 15:59:33 crc kubenswrapper[4967]: I1121 15:59:33.877974 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 15:59:34 crc kubenswrapper[4967]: I1121 15:59:34.344640 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerStarted","Data":"fd0c102f8ffdc243ec5d9f3fb13eabd4cf5e9b9999173025251e1bcd052418de"} Nov 21 15:59:34 crc kubenswrapper[4967]: I1121 15:59:34.347250 4967 generic.go:334] "Generic (PLEG): container finished" podID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerID="6f2a125e58fce7aa897e8fbb6ab534809b70f2dcc81e2ca34b405725c044324e" exitCode=0 Nov 21 15:59:34 crc kubenswrapper[4967]: I1121 15:59:34.347519 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" event={"ID":"654b54a8-f5b2-480d-806c-b1c9a8b51e21","Type":"ContainerDied","Data":"6f2a125e58fce7aa897e8fbb6ab534809b70f2dcc81e2ca34b405725c044324e"} Nov 21 15:59:34 crc kubenswrapper[4967]: I1121 15:59:34.347588 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" event={"ID":"654b54a8-f5b2-480d-806c-b1c9a8b51e21","Type":"ContainerStarted","Data":"3ca001a5094d6192d331ef1e12cd1cf06bdab5ebdbafb40fbae9403881d28a82"} Nov 21 15:59:34 crc kubenswrapper[4967]: I1121 15:59:34.988330 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.231712 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.360851 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" event={"ID":"654b54a8-f5b2-480d-806c-b1c9a8b51e21","Type":"ContainerStarted","Data":"63efc3464f37c63a3f2107684800694adc4f776ce71758995f8382d7e457df3e"} Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.360926 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.361066 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-log" containerID="cri-o://e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e" gracePeriod=30 Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.361100 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-api" containerID="cri-o://8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6" gracePeriod=30 Nov 21 15:59:35 crc kubenswrapper[4967]: I1121 15:59:35.402658 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" podStartSLOduration=3.402643198 podStartE2EDuration="3.402643198s" podCreationTimestamp="2025-11-21 15:59:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:35.399175896 +0000 UTC m=+1463.657696934" watchObservedRunningTime="2025-11-21 15:59:35.402643198 +0000 UTC m=+1463.661164206" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.374549 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.415018 4967 generic.go:334] "Generic (PLEG): container finished" podID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerID="1e5741d399e6cdd6f295470a873c05f0c3922675e1e9bc8ab0babede1af7d6e4" exitCode=137 Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.415169 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerDied","Data":"1e5741d399e6cdd6f295470a873c05f0c3922675e1e9bc8ab0babede1af7d6e4"} Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.434188 4967 generic.go:334] "Generic (PLEG): container finished" podID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerID="1a961c45eaba440f4856c5acc727fc7f17824dd8a7966789ff4dae85765040a6" exitCode=137 Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.434287 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerDied","Data":"1a961c45eaba440f4856c5acc727fc7f17824dd8a7966789ff4dae85765040a6"} Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.437177 4967 generic.go:334] "Generic (PLEG): container finished" podID="ffce645f-18bf-4182-8524-38af2bc17063" containerID="561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3" exitCode=137 Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.437286 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ffce645f-18bf-4182-8524-38af2bc17063","Type":"ContainerDied","Data":"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3"} Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.437330 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ffce645f-18bf-4182-8524-38af2bc17063","Type":"ContainerDied","Data":"ed7c10562e5d24e1f2cf2000a72f3fcf2fa824b54a0ae1d1424e977c3223c8f5"} Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.437357 4967 scope.go:117] "RemoveContainer" containerID="561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.437743 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:36 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:36 crc kubenswrapper[4967]: > Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.441612 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.446850 4967 generic.go:334] "Generic (PLEG): container finished" podID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerID="e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e" exitCode=143 Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.446936 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerDied","Data":"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e"} Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.487054 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data\") pod \"ffce645f-18bf-4182-8524-38af2bc17063\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.487162 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gj9k9\" (UniqueName: \"kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9\") pod \"ffce645f-18bf-4182-8524-38af2bc17063\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.487269 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle\") pod \"ffce645f-18bf-4182-8524-38af2bc17063\" (UID: \"ffce645f-18bf-4182-8524-38af2bc17063\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.502600 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9" (OuterVolumeSpecName: "kube-api-access-gj9k9") pod "ffce645f-18bf-4182-8524-38af2bc17063" (UID: "ffce645f-18bf-4182-8524-38af2bc17063"). InnerVolumeSpecName "kube-api-access-gj9k9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.521672 4967 scope.go:117] "RemoveContainer" containerID="561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.522489 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3\": container with ID starting with 561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3 not found: ID does not exist" containerID="561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.522533 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3"} err="failed to get container status \"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3\": rpc error: code = NotFound desc = could not find container \"561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3\": container with ID starting with 561c12afe2562b481f1e8e5e0d200eed29d0232d3993bb2fab59f173a02a32f3 not found: ID does not exist" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.550741 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffce645f-18bf-4182-8524-38af2bc17063" (UID: "ffce645f-18bf-4182-8524-38af2bc17063"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.570417 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.583244 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data" (OuterVolumeSpecName: "config-data") pod "ffce645f-18bf-4182-8524-38af2bc17063" (UID: "ffce645f-18bf-4182-8524-38af2bc17063"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.595228 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs\") pod \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.595346 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle\") pod \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.595644 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data\") pod \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.595768 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkgxp\" (UniqueName: \"kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp\") pod \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\" (UID: \"0c85c174-d9ef-4c9a-8afb-7071c90b5578\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.596765 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.596793 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gj9k9\" (UniqueName: \"kubernetes.io/projected/ffce645f-18bf-4182-8524-38af2bc17063-kube-api-access-gj9k9\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.596807 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffce645f-18bf-4182-8524-38af2bc17063-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.601344 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs" (OuterVolumeSpecName: "logs") pod "0c85c174-d9ef-4c9a-8afb-7071c90b5578" (UID: "0c85c174-d9ef-4c9a-8afb-7071c90b5578"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.606581 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp" (OuterVolumeSpecName: "kube-api-access-nkgxp") pod "0c85c174-d9ef-4c9a-8afb-7071c90b5578" (UID: "0c85c174-d9ef-4c9a-8afb-7071c90b5578"). InnerVolumeSpecName "kube-api-access-nkgxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.646238 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c85c174-d9ef-4c9a-8afb-7071c90b5578" (UID: "0c85c174-d9ef-4c9a-8afb-7071c90b5578"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.659469 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data" (OuterVolumeSpecName: "config-data") pod "0c85c174-d9ef-4c9a-8afb-7071c90b5578" (UID: "0c85c174-d9ef-4c9a-8afb-7071c90b5578"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.699357 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c85c174-d9ef-4c9a-8afb-7071c90b5578-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.699491 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.699506 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c85c174-d9ef-4c9a-8afb-7071c90b5578-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.699517 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkgxp\" (UniqueName: \"kubernetes.io/projected/0c85c174-d9ef-4c9a-8afb-7071c90b5578-kube-api-access-nkgxp\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.766803 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.787845 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.801440 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsjcj\" (UniqueName: \"kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj\") pod \"4f74f871-6a82-49a3-a9e7-a991a513027b\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.801537 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts\") pod \"4f74f871-6a82-49a3-a9e7-a991a513027b\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.801564 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data\") pod \"4f74f871-6a82-49a3-a9e7-a991a513027b\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.801756 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle\") pod \"4f74f871-6a82-49a3-a9e7-a991a513027b\" (UID: \"4f74f871-6a82-49a3-a9e7-a991a513027b\") " Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.804253 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.818057 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj" (OuterVolumeSpecName: "kube-api-access-qsjcj") pod "4f74f871-6a82-49a3-a9e7-a991a513027b" (UID: "4f74f871-6a82-49a3-a9e7-a991a513027b"). InnerVolumeSpecName "kube-api-access-qsjcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.824637 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825114 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-listener" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825127 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-listener" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825144 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-log" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825151 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-log" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825169 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-metadata" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825175 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-metadata" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825198 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-notifier" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825204 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-notifier" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825222 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffce645f-18bf-4182-8524-38af2bc17063" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825227 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffce645f-18bf-4182-8524-38af2bc17063" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825246 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-api" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825252 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-api" Nov 21 15:59:36 crc kubenswrapper[4967]: E1121 15:59:36.825262 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-evaluator" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825268 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-evaluator" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825483 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffce645f-18bf-4182-8524-38af2bc17063" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825498 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-metadata" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825508 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-api" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825519 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-listener" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825533 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" containerName="nova-metadata-log" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825546 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-evaluator" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.825561 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" containerName="aodh-notifier" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.826420 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.828921 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts" (OuterVolumeSpecName: "scripts") pod "4f74f871-6a82-49a3-a9e7-a991a513027b" (UID: "4f74f871-6a82-49a3-a9e7-a991a513027b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.829662 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.829753 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.829796 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.857263 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.904773 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rp4p\" (UniqueName: \"kubernetes.io/projected/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-kube-api-access-9rp4p\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905011 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905045 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905210 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905350 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905655 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsjcj\" (UniqueName: \"kubernetes.io/projected/4f74f871-6a82-49a3-a9e7-a991a513027b-kube-api-access-qsjcj\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.905683 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:36 crc kubenswrapper[4967]: I1121 15:59:36.981609 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f74f871-6a82-49a3-a9e7-a991a513027b" (UID: "4f74f871-6a82-49a3-a9e7-a991a513027b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.007648 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rp4p\" (UniqueName: \"kubernetes.io/projected/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-kube-api-access-9rp4p\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.007793 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.007830 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.007913 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.007994 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.008104 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.011893 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.014226 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.014606 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.014618 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.023567 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data" (OuterVolumeSpecName: "config-data") pod "4f74f871-6a82-49a3-a9e7-a991a513027b" (UID: "4f74f871-6a82-49a3-a9e7-a991a513027b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.024274 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rp4p\" (UniqueName: \"kubernetes.io/projected/2e73aeef-13eb-4c55-9e06-bb56b49c9e5c-kube-api-access-9rp4p\") pod \"nova-cell1-novncproxy-0\" (UID: \"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.110269 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f74f871-6a82-49a3-a9e7-a991a513027b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.311473 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.463569 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"4f74f871-6a82-49a3-a9e7-a991a513027b","Type":"ContainerDied","Data":"853f1050ee1ff7b196c52533e1f0dc0a492fbf9ee5a16c45c2fec6fc5ca10d89"} Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.463606 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.463637 4967 scope.go:117] "RemoveContainer" containerID="1a961c45eaba440f4856c5acc727fc7f17824dd8a7966789ff4dae85765040a6" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.476566 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerStarted","Data":"7e2258c8109932dbead9411b32496fb8c5e67d63da73879ca03b450a14dda9c3"} Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.476725 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-central-agent" containerID="cri-o://65d1bf90843c75f050fc3d820c7983839e42e9988e857950c09c891f2d73a840" gracePeriod=30 Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.476986 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.477199 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="proxy-httpd" containerID="cri-o://7e2258c8109932dbead9411b32496fb8c5e67d63da73879ca03b450a14dda9c3" gracePeriod=30 Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.477287 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="sg-core" containerID="cri-o://fd0c102f8ffdc243ec5d9f3fb13eabd4cf5e9b9999173025251e1bcd052418de" gracePeriod=30 Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.477377 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-notification-agent" containerID="cri-o://502cc693daa70b71e2f428a31d8e522c294126ec19244e5be6e44bf6a1ff14c1" gracePeriod=30 Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.483487 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0c85c174-d9ef-4c9a-8afb-7071c90b5578","Type":"ContainerDied","Data":"29814839aa4ca581d3c5486ece5b655acb7168192a1935ddf0e8ba3a5e104af3"} Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.483858 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.500995 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.298508225 podStartE2EDuration="12.500978329s" podCreationTimestamp="2025-11-21 15:59:25 +0000 UTC" firstStartedPulling="2025-11-21 15:59:26.587458869 +0000 UTC m=+1454.845979877" lastFinishedPulling="2025-11-21 15:59:35.789928973 +0000 UTC m=+1464.048449981" observedRunningTime="2025-11-21 15:59:37.497696703 +0000 UTC m=+1465.756217701" watchObservedRunningTime="2025-11-21 15:59:37.500978329 +0000 UTC m=+1465.759499337" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.524562 4967 scope.go:117] "RemoveContainer" containerID="7e11cbb3057d6975e8df896d8fbe7291ea52ff0553f90dc377f996aa57edaa1d" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.577907 4967 scope.go:117] "RemoveContainer" containerID="5c94c4de764d7e09052b910d266c640a80428e60e7ecea0aca91e131d6b1129a" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.588344 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.623588 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.652808 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.691227 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.710071 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.713145 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.718226 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.718287 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.718486 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.718558 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6bsgb" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.720032 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.720799 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.731954 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.732071 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.732112 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.732168 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.732323 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccnfq\" (UniqueName: \"kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.733373 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.734286 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.737643 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.741724 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.742046 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.742718 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.819060 4967 scope.go:117] "RemoveContainer" containerID="9128ac2fa213451d9486f8c34cb9a75e619c0341eb0fa0b5057dde6062413a33" Nov 21 15:59:37 crc kubenswrapper[4967]: W1121 15:59:37.820090 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e73aeef_13eb_4c55_9e06_bb56b49c9e5c.slice/crio-b0ef910931be40214cdb7cd02e56259ef1412826bf14aae0b29c16b1fcc42c6b WatchSource:0}: Error finding container b0ef910931be40214cdb7cd02e56259ef1412826bf14aae0b29c16b1fcc42c6b: Status 404 returned error can't find the container with id b0ef910931be40214cdb7cd02e56259ef1412826bf14aae0b29c16b1fcc42c6b Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.828974 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837066 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lls4n\" (UniqueName: \"kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837403 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837437 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837479 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837562 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccnfq\" (UniqueName: \"kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837600 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837678 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837698 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837740 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837766 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.837793 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.845641 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.845912 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.848360 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.859972 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.860061 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.864551 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccnfq\" (UniqueName: \"kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq\") pod \"aodh-0\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " pod="openstack/aodh-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.939202 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.939253 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lls4n\" (UniqueName: \"kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.939440 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.939481 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.939531 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.940105 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.946074 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.946853 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.950825 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.960252 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lls4n\" (UniqueName: \"kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n\") pod \"nova-metadata-0\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " pod="openstack/nova-metadata-0" Nov 21 15:59:37 crc kubenswrapper[4967]: I1121 15:59:37.969045 4967 scope.go:117] "RemoveContainer" containerID="1e5741d399e6cdd6f295470a873c05f0c3922675e1e9bc8ab0babede1af7d6e4" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.018037 4967 scope.go:117] "RemoveContainer" containerID="23d64f268e8f784e593190234efc55f15f7a62b253f0edf99d7b774e9c66045d" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.117531 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.245040 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.501868 4967 generic.go:334] "Generic (PLEG): container finished" podID="df96ef36-2101-4339-b124-d6df69373e2b" containerID="7e2258c8109932dbead9411b32496fb8c5e67d63da73879ca03b450a14dda9c3" exitCode=0 Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502491 4967 generic.go:334] "Generic (PLEG): container finished" podID="df96ef36-2101-4339-b124-d6df69373e2b" containerID="fd0c102f8ffdc243ec5d9f3fb13eabd4cf5e9b9999173025251e1bcd052418de" exitCode=2 Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502508 4967 generic.go:334] "Generic (PLEG): container finished" podID="df96ef36-2101-4339-b124-d6df69373e2b" containerID="502cc693daa70b71e2f428a31d8e522c294126ec19244e5be6e44bf6a1ff14c1" exitCode=0 Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502518 4967 generic.go:334] "Generic (PLEG): container finished" podID="df96ef36-2101-4339-b124-d6df69373e2b" containerID="65d1bf90843c75f050fc3d820c7983839e42e9988e857950c09c891f2d73a840" exitCode=0 Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.501922 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerDied","Data":"7e2258c8109932dbead9411b32496fb8c5e67d63da73879ca03b450a14dda9c3"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502605 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerDied","Data":"fd0c102f8ffdc243ec5d9f3fb13eabd4cf5e9b9999173025251e1bcd052418de"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502637 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerDied","Data":"502cc693daa70b71e2f428a31d8e522c294126ec19244e5be6e44bf6a1ff14c1"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.502661 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerDied","Data":"65d1bf90843c75f050fc3d820c7983839e42e9988e857950c09c891f2d73a840"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.506504 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c","Type":"ContainerStarted","Data":"f0bb2c09fc456b4fa5c83eebf7387bd4f8cebb7ef0846644f41a62f3d5a371b7"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.506564 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2e73aeef-13eb-4c55-9e06-bb56b49c9e5c","Type":"ContainerStarted","Data":"b0ef910931be40214cdb7cd02e56259ef1412826bf14aae0b29c16b1fcc42c6b"} Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.547582 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.547559734 podStartE2EDuration="2.547559734s" podCreationTimestamp="2025-11-21 15:59:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:38.530474492 +0000 UTC m=+1466.788995500" watchObservedRunningTime="2025-11-21 15:59:38.547559734 +0000 UTC m=+1466.806080742" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.591491 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c85c174-d9ef-4c9a-8afb-7071c90b5578" path="/var/lib/kubelet/pods/0c85c174-d9ef-4c9a-8afb-7071c90b5578/volumes" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.615104 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f74f871-6a82-49a3-a9e7-a991a513027b" path="/var/lib/kubelet/pods/4f74f871-6a82-49a3-a9e7-a991a513027b/volumes" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.617427 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffce645f-18bf-4182-8524-38af2bc17063" path="/var/lib/kubelet/pods/ffce645f-18bf-4182-8524-38af2bc17063/volumes" Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.695768 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 15:59:38 crc kubenswrapper[4967]: I1121 15:59:38.906701 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:38 crc kubenswrapper[4967]: W1121 15:59:38.908755 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba8ea3e3_1dbe_4067_84bc_d8150d317245.slice/crio-5ad77989c284faa7c3df466d5649d8df242daa7b0c3b273776c3e31216cf1af2 WatchSource:0}: Error finding container 5ad77989c284faa7c3df466d5649d8df242daa7b0c3b273776c3e31216cf1af2: Status 404 returned error can't find the container with id 5ad77989c284faa7c3df466d5649d8df242daa7b0c3b273776c3e31216cf1af2 Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.117715 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185258 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185454 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvv9z\" (UniqueName: \"kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185505 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185576 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185622 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185657 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185908 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.185952 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd\") pod \"df96ef36-2101-4339-b124-d6df69373e2b\" (UID: \"df96ef36-2101-4339-b124-d6df69373e2b\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.187097 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.188261 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.196792 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z" (OuterVolumeSpecName: "kube-api-access-zvv9z") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "kube-api-access-zvv9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.206587 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts" (OuterVolumeSpecName: "scripts") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.288856 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvv9z\" (UniqueName: \"kubernetes.io/projected/df96ef36-2101-4339-b124-d6df69373e2b-kube-api-access-zvv9z\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.288887 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.288899 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.288910 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/df96ef36-2101-4339-b124-d6df69373e2b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.315390 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.322638 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.371507 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.391746 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle\") pod \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.391908 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data\") pod \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.391961 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dwzz\" (UniqueName: \"kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz\") pod \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.392037 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs\") pod \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\" (UID: \"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288\") " Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.392853 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.392869 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.393520 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs" (OuterVolumeSpecName: "logs") pod "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" (UID: "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.419848 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz" (OuterVolumeSpecName: "kube-api-access-9dwzz") pod "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" (UID: "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288"). InnerVolumeSpecName "kube-api-access-9dwzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.475227 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.490862 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" (UID: "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.494923 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dwzz\" (UniqueName: \"kubernetes.io/projected/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-kube-api-access-9dwzz\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.494964 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-logs\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.494977 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.494987 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.504593 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data" (OuterVolumeSpecName: "config-data") pod "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" (UID: "57790c9e-cc4b-41f4-aaa5-8c8b0ee09288"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.547456 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerStarted","Data":"b6ce8922feb2be49e6d43ef28dc9f4d335e51b3e03aa42c925c35ba785026195"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.547600 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerStarted","Data":"5ad77989c284faa7c3df466d5649d8df242daa7b0c3b273776c3e31216cf1af2"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.549500 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data" (OuterVolumeSpecName: "config-data") pod "df96ef36-2101-4339-b124-d6df69373e2b" (UID: "df96ef36-2101-4339-b124-d6df69373e2b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.561599 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"df96ef36-2101-4339-b124-d6df69373e2b","Type":"ContainerDied","Data":"9f1810f1a9fab4ffcfaaf610c06cf1e1923ab3196d7c68313a6146e7db0b7aa1"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.561647 4967 scope.go:117] "RemoveContainer" containerID="7e2258c8109932dbead9411b32496fb8c5e67d63da73879ca03b450a14dda9c3" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.561863 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.568704 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerStarted","Data":"0420700ff345951a107280a216a7f8d07648b93b509819974f7a30249522b709"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.574081 4967 generic.go:334] "Generic (PLEG): container finished" podID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerID="8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6" exitCode=0 Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.574746 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.575204 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerDied","Data":"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.575286 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"57790c9e-cc4b-41f4-aaa5-8c8b0ee09288","Type":"ContainerDied","Data":"2437e6f36c401b40dbdbd24188c024e027e88eb250237a9683d847d1bebe40cf"} Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.593262 4967 scope.go:117] "RemoveContainer" containerID="fd0c102f8ffdc243ec5d9f3fb13eabd4cf5e9b9999173025251e1bcd052418de" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.600137 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.600161 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df96ef36-2101-4339-b124-d6df69373e2b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.741027 4967 scope.go:117] "RemoveContainer" containerID="502cc693daa70b71e2f428a31d8e522c294126ec19244e5be6e44bf6a1ff14c1" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.766006 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.786400 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.787729 4967 scope.go:117] "RemoveContainer" containerID="65d1bf90843c75f050fc3d820c7983839e42e9988e857950c09c891f2d73a840" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.821088 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.833865 4967 scope.go:117] "RemoveContainer" containerID="8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.859750 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.884367 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.884932 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-central-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.884952 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-central-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.884967 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-log" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.884975 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-log" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.885008 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="sg-core" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885016 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="sg-core" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.885051 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-notification-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885059 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-notification-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.885077 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="proxy-httpd" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885082 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="proxy-httpd" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.885102 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-api" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885109 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-api" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885412 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-central-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885436 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="ceilometer-notification-agent" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885454 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="proxy-httpd" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885476 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-log" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885497 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="df96ef36-2101-4339-b124-d6df69373e2b" containerName="sg-core" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.885508 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" containerName="nova-api-api" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.887155 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.901710 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.901899 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.902010 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910331 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910459 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910490 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhblm\" (UniqueName: \"kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910512 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910624 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.910676 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.937378 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.948252 4967 scope.go:117] "RemoveContainer" containerID="e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.959503 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.964514 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.967723 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.968987 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.969542 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.977513 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.997251 4967 scope.go:117] "RemoveContainer" containerID="8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.998085 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6\": container with ID starting with 8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6 not found: ID does not exist" containerID="8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.998134 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6"} err="failed to get container status \"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6\": rpc error: code = NotFound desc = could not find container \"8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6\": container with ID starting with 8457305cb1421c4fd494565ba87a1cc04a023299873fb13766968f4253d44fc6 not found: ID does not exist" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.998174 4967 scope.go:117] "RemoveContainer" containerID="e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e" Nov 21 15:59:39 crc kubenswrapper[4967]: E1121 15:59:39.998549 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e\": container with ID starting with e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e not found: ID does not exist" containerID="e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e" Nov 21 15:59:39 crc kubenswrapper[4967]: I1121 15:59:39.998617 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e"} err="failed to get container status \"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e\": rpc error: code = NotFound desc = could not find container \"e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e\": container with ID starting with e5c6f6432b4d0de7a38e811ceea976a8a6b1812032e792816b4364a82331569e not found: ID does not exist" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013024 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013148 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013210 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013257 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013295 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013482 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013514 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vth8c\" (UniqueName: \"kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013545 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013575 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013625 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013687 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013752 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013793 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhblm\" (UniqueName: \"kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.013821 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.014607 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.020481 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.029055 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.029480 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.029502 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.034209 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhblm\" (UniqueName: \"kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm\") pod \"nova-api-0\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.117830 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.117883 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vth8c\" (UniqueName: \"kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.117909 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.117939 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.117978 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.118054 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.118084 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.118110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.118749 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.118964 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.123935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.126375 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.128871 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.131000 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.131646 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.140614 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vth8c\" (UniqueName: \"kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c\") pod \"ceilometer-0\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.255594 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.292463 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.563699 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57790c9e-cc4b-41f4-aaa5-8c8b0ee09288" path="/var/lib/kubelet/pods/57790c9e-cc4b-41f4-aaa5-8c8b0ee09288/volumes" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.574025 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df96ef36-2101-4339-b124-d6df69373e2b" path="/var/lib/kubelet/pods/df96ef36-2101-4339-b124-d6df69373e2b/volumes" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.613523 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerStarted","Data":"e0fcd1c44a2b65ec35a57a1a20096e11ad803f5f6fc5055cd69d42be36af1e9a"} Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.621932 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerStarted","Data":"8fd455a14d307aaf1106040fbc7532d3bb3c2da66620b86b63286f6c678d6184"} Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.622129 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerStarted","Data":"e9672fac08d1234a417c9e8ea5490050c3a0456fa4427eabb7ad5e53de328380"} Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.662518 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.662496714 podStartE2EDuration="3.662496714s" podCreationTimestamp="2025-11-21 15:59:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:40.632235504 +0000 UTC m=+1468.890756522" watchObservedRunningTime="2025-11-21 15:59:40.662496714 +0000 UTC m=+1468.921017722" Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.803612 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:40 crc kubenswrapper[4967]: W1121 15:59:40.806980 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc6a5faf_5d84_46d7_929c_3acbef695c05.slice/crio-e7312a1a73db8210b1563ba5d756b30010eb732df8162b4e64424d89355e4765 WatchSource:0}: Error finding container e7312a1a73db8210b1563ba5d756b30010eb732df8162b4e64424d89355e4765: Status 404 returned error can't find the container with id e7312a1a73db8210b1563ba5d756b30010eb732df8162b4e64424d89355e4765 Nov 21 15:59:40 crc kubenswrapper[4967]: I1121 15:59:40.926414 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 15:59:40 crc kubenswrapper[4967]: W1121 15:59:40.935129 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde37571f_76bd_4e9b_9141_cf2c056bab84.slice/crio-cb62ba1d8ce6af2a850e82974092886deb35a115a37b9b4685856fbedd1049ef WatchSource:0}: Error finding container cb62ba1d8ce6af2a850e82974092886deb35a115a37b9b4685856fbedd1049ef: Status 404 returned error can't find the container with id cb62ba1d8ce6af2a850e82974092886deb35a115a37b9b4685856fbedd1049ef Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.641822 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerStarted","Data":"cb62ba1d8ce6af2a850e82974092886deb35a115a37b9b4685856fbedd1049ef"} Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.646303 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerStarted","Data":"c6ad87cde40081fed81919b409e55977f01e1ca0bd2ceead042c856d0108613b"} Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.646477 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerStarted","Data":"4376347914d2f13416d98010597aa419307e55bb56b84b27d25fb1c44d8d0949"} Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.646494 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerStarted","Data":"e7312a1a73db8210b1563ba5d756b30010eb732df8162b4e64424d89355e4765"} Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.649903 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerStarted","Data":"da2b664446ba266ce5c348a07faad926f0043b1e13a753c3231fd9817093357a"} Nov 21 15:59:41 crc kubenswrapper[4967]: I1121 15:59:41.678039 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.678018356 podStartE2EDuration="2.678018356s" podCreationTimestamp="2025-11-21 15:59:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:41.669709942 +0000 UTC m=+1469.928230960" watchObservedRunningTime="2025-11-21 15:59:41.678018356 +0000 UTC m=+1469.936539364" Nov 21 15:59:42 crc kubenswrapper[4967]: I1121 15:59:42.312554 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:42 crc kubenswrapper[4967]: I1121 15:59:42.866553 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 15:59:42 crc kubenswrapper[4967]: I1121 15:59:42.948689 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:59:42 crc kubenswrapper[4967]: I1121 15:59:42.948940 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="dnsmasq-dns" containerID="cri-o://08d586ead6747c4d5608371fd684665888e0b4eb12b61b2ed59f0a170ff1f5a7" gracePeriod=10 Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.245273 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.245341 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.358769 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.245:5353: connect: connection refused" Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.671884 4967 generic.go:334] "Generic (PLEG): container finished" podID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerID="08d586ead6747c4d5608371fd684665888e0b4eb12b61b2ed59f0a170ff1f5a7" exitCode=0 Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.671959 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerDied","Data":"08d586ead6747c4d5608371fd684665888e0b4eb12b61b2ed59f0a170ff1f5a7"} Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.674974 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerStarted","Data":"4aef45e6425d707959a01717a237cf6880a470be4b876915a9fb2e2b178ecb31"} Nov 21 15:59:43 crc kubenswrapper[4967]: I1121 15:59:43.706822 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.905975866 podStartE2EDuration="6.706796824s" podCreationTimestamp="2025-11-21 15:59:37 +0000 UTC" firstStartedPulling="2025-11-21 15:59:38.687043095 +0000 UTC m=+1466.945564093" lastFinishedPulling="2025-11-21 15:59:42.487864043 +0000 UTC m=+1470.746385051" observedRunningTime="2025-11-21 15:59:43.6978099 +0000 UTC m=+1471.956330908" watchObservedRunningTime="2025-11-21 15:59:43.706796824 +0000 UTC m=+1471.965317832" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.689097 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerStarted","Data":"52a72174fca64a58a58e14f6ef18e868cc0cec3a48d2df09a6a1d1a3c06c2923"} Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.690966 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" event={"ID":"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc","Type":"ContainerDied","Data":"bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219"} Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.691010 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc66b36e6ad60f3f2006e4f8d8c9be6de7d7ee986998a13153be81961609d219" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.810813 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.837456 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.837910 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.838119 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.838196 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zksr\" (UniqueName: \"kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.838227 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.838349 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config\") pod \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\" (UID: \"7c75d46e-d31e-4505-ba3b-88d50d9bf5dc\") " Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.874962 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr" (OuterVolumeSpecName: "kube-api-access-7zksr") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "kube-api-access-7zksr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.941005 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zksr\" (UniqueName: \"kubernetes.io/projected/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-kube-api-access-7zksr\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.960182 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.963224 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.963329 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config" (OuterVolumeSpecName: "config") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.965487 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:44 crc kubenswrapper[4967]: I1121 15:59:44.971274 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" (UID: "7c75d46e-d31e-4505-ba3b-88d50d9bf5dc"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.043798 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.043848 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.043861 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.043876 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.043890 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.701999 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerStarted","Data":"b0fd7712595dfef1c9e78fd9fbdc879956b6b2a0c8170e1053c255c28e81b21c"} Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.702034 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-mmfpv" Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.735002 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:59:45 crc kubenswrapper[4967]: I1121 15:59:45.745449 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-mmfpv"] Nov 21 15:59:46 crc kubenswrapper[4967]: I1121 15:59:46.449627 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:46 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:46 crc kubenswrapper[4967]: > Nov 21 15:59:46 crc kubenswrapper[4967]: I1121 15:59:46.548780 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" path="/var/lib/kubelet/pods/7c75d46e-d31e-4505-ba3b-88d50d9bf5dc/volumes" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.312610 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.337084 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.727469 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerStarted","Data":"4a60542b861b693f099a7157093eb814f0b805256dd297d8df1cca920b2fcda9"} Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.762937 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.933432 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-vfwfv"] Nov 21 15:59:47 crc kubenswrapper[4967]: E1121 15:59:47.933994 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="init" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.934009 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="init" Nov 21 15:59:47 crc kubenswrapper[4967]: E1121 15:59:47.934019 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="dnsmasq-dns" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.934025 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="dnsmasq-dns" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.934269 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c75d46e-d31e-4505-ba3b-88d50d9bf5dc" containerName="dnsmasq-dns" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.935120 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.938739 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.938904 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 21 15:59:47 crc kubenswrapper[4967]: I1121 15:59:47.957152 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-vfwfv"] Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.022196 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.022256 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.022564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsfqx\" (UniqueName: \"kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.022626 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.124444 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.124514 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.124633 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsfqx\" (UniqueName: \"kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.124664 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.130247 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.130510 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.137423 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.139548 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsfqx\" (UniqueName: \"kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx\") pod \"nova-cell1-cell-mapping-vfwfv\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.245238 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.245281 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.255701 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:48 crc kubenswrapper[4967]: I1121 15:59:48.843940 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-vfwfv"] Nov 21 15:59:48 crc kubenswrapper[4967]: W1121 15:59:48.847273 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55d04a95_27d7_409f_a688_f92ddb3e579c.slice/crio-22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078 WatchSource:0}: Error finding container 22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078: Status 404 returned error can't find the container with id 22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078 Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.266149 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.255:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.266405 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.255:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.750121 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vfwfv" event={"ID":"55d04a95-27d7-409f-a688-f92ddb3e579c","Type":"ContainerStarted","Data":"a053ea3bc8d99e9e237f2de86c3ba08e975e5680c5a89a2f46f05e4701c93d62"} Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.750710 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vfwfv" event={"ID":"55d04a95-27d7-409f-a688-f92ddb3e579c","Type":"ContainerStarted","Data":"22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078"} Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.752839 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerStarted","Data":"04d9448d726db4685d1d24e640d1e76a6c53b587de322fb62bd815532f09fbd1"} Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.753147 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.769084 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-vfwfv" podStartSLOduration=2.769063398 podStartE2EDuration="2.769063398s" podCreationTimestamp="2025-11-21 15:59:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:59:49.766021439 +0000 UTC m=+1478.024542447" watchObservedRunningTime="2025-11-21 15:59:49.769063398 +0000 UTC m=+1478.027584406" Nov 21 15:59:49 crc kubenswrapper[4967]: I1121 15:59:49.855078 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.65672966 podStartE2EDuration="10.855056496s" podCreationTimestamp="2025-11-21 15:59:39 +0000 UTC" firstStartedPulling="2025-11-21 15:59:40.939035493 +0000 UTC m=+1469.197556501" lastFinishedPulling="2025-11-21 15:59:49.137362329 +0000 UTC m=+1477.395883337" observedRunningTime="2025-11-21 15:59:49.811105254 +0000 UTC m=+1478.069626282" watchObservedRunningTime="2025-11-21 15:59:49.855056496 +0000 UTC m=+1478.113577504" Nov 21 15:59:50 crc kubenswrapper[4967]: I1121 15:59:50.256536 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 15:59:50 crc kubenswrapper[4967]: I1121 15:59:50.256595 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 15:59:51 crc kubenswrapper[4967]: I1121 15:59:51.513677 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.0:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:51 crc kubenswrapper[4967]: I1121 15:59:51.514923 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.0:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 15:59:54 crc kubenswrapper[4967]: I1121 15:59:54.823560 4967 generic.go:334] "Generic (PLEG): container finished" podID="55d04a95-27d7-409f-a688-f92ddb3e579c" containerID="a053ea3bc8d99e9e237f2de86c3ba08e975e5680c5a89a2f46f05e4701c93d62" exitCode=0 Nov 21 15:59:54 crc kubenswrapper[4967]: I1121 15:59:54.823653 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vfwfv" event={"ID":"55d04a95-27d7-409f-a688-f92ddb3e579c","Type":"ContainerDied","Data":"a053ea3bc8d99e9e237f2de86c3ba08e975e5680c5a89a2f46f05e4701c93d62"} Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.418867 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 15:59:56 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 15:59:56 crc kubenswrapper[4967]: > Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.497048 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.537338 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsfqx\" (UniqueName: \"kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx\") pod \"55d04a95-27d7-409f-a688-f92ddb3e579c\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.537744 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts\") pod \"55d04a95-27d7-409f-a688-f92ddb3e579c\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.537765 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data\") pod \"55d04a95-27d7-409f-a688-f92ddb3e579c\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.538011 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle\") pod \"55d04a95-27d7-409f-a688-f92ddb3e579c\" (UID: \"55d04a95-27d7-409f-a688-f92ddb3e579c\") " Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.565001 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts" (OuterVolumeSpecName: "scripts") pod "55d04a95-27d7-409f-a688-f92ddb3e579c" (UID: "55d04a95-27d7-409f-a688-f92ddb3e579c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.565072 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx" (OuterVolumeSpecName: "kube-api-access-zsfqx") pod "55d04a95-27d7-409f-a688-f92ddb3e579c" (UID: "55d04a95-27d7-409f-a688-f92ddb3e579c"). InnerVolumeSpecName "kube-api-access-zsfqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.581912 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data" (OuterVolumeSpecName: "config-data") pod "55d04a95-27d7-409f-a688-f92ddb3e579c" (UID: "55d04a95-27d7-409f-a688-f92ddb3e579c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:56 crc kubenswrapper[4967]: I1121 15:59:56.582525 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55d04a95-27d7-409f-a688-f92ddb3e579c" (UID: "55d04a95-27d7-409f-a688-f92ddb3e579c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.641508 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.641544 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.641554 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55d04a95-27d7-409f-a688-f92ddb3e579c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.641569 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsfqx\" (UniqueName: \"kubernetes.io/projected/55d04a95-27d7-409f-a688-f92ddb3e579c-kube-api-access-zsfqx\") on node \"crc\" DevicePath \"\"" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.878667 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-vfwfv" event={"ID":"55d04a95-27d7-409f-a688-f92ddb3e579c","Type":"ContainerDied","Data":"22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078"} Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.878699 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22402656e14187b80491493945195fed534f6b9ef83d0db1719226dd4e132078" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:56.878836 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-vfwfv" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.050528 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.050824 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerName="nova-scheduler-scheduler" containerID="cri-o://21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" gracePeriod=30 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.066254 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.066642 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-log" containerID="cri-o://4376347914d2f13416d98010597aa419307e55bb56b84b27d25fb1c44d8d0949" gracePeriod=30 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.066852 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-api" containerID="cri-o://c6ad87cde40081fed81919b409e55977f01e1ca0bd2ceead042c856d0108613b" gracePeriod=30 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.117917 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.118583 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-log" containerID="cri-o://b6ce8922feb2be49e6d43ef28dc9f4d335e51b3e03aa42c925c35ba785026195" gracePeriod=30 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.118759 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-metadata" containerID="cri-o://e0fcd1c44a2b65ec35a57a1a20096e11ad803f5f6fc5055cd69d42be36af1e9a" gracePeriod=30 Nov 21 15:59:57 crc kubenswrapper[4967]: E1121 15:59:57.457773 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 15:59:57 crc kubenswrapper[4967]: E1121 15:59:57.459619 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 15:59:57 crc kubenswrapper[4967]: E1121 15:59:57.461016 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 15:59:57 crc kubenswrapper[4967]: E1121 15:59:57.461074 4967 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerName="nova-scheduler-scheduler" Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.904940 4967 generic.go:334] "Generic (PLEG): container finished" podID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerID="b6ce8922feb2be49e6d43ef28dc9f4d335e51b3e03aa42c925c35ba785026195" exitCode=143 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.905335 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerDied","Data":"b6ce8922feb2be49e6d43ef28dc9f4d335e51b3e03aa42c925c35ba785026195"} Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.908557 4967 generic.go:334] "Generic (PLEG): container finished" podID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerID="4376347914d2f13416d98010597aa419307e55bb56b84b27d25fb1c44d8d0949" exitCode=143 Nov 21 15:59:57 crc kubenswrapper[4967]: I1121 15:59:57.908595 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerDied","Data":"4376347914d2f13416d98010597aa419307e55bb56b84b27d25fb1c44d8d0949"} Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.565256 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 15:59:58 crc kubenswrapper[4967]: E1121 15:59:58.565963 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55d04a95-27d7-409f-a688-f92ddb3e579c" containerName="nova-manage" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.565982 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="55d04a95-27d7-409f-a688-f92ddb3e579c" containerName="nova-manage" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.566294 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="55d04a95-27d7-409f-a688-f92ddb3e579c" containerName="nova-manage" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.568295 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.585525 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.585766 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.585801 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5lkf\" (UniqueName: \"kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.595394 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.688733 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.688957 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.688994 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5lkf\" (UniqueName: \"kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.689354 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.689412 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.709091 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5lkf\" (UniqueName: \"kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf\") pod \"community-operators-jsxjk\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:58 crc kubenswrapper[4967]: I1121 15:59:58.897638 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 15:59:59 crc kubenswrapper[4967]: I1121 15:59:59.438726 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 15:59:59 crc kubenswrapper[4967]: W1121 15:59:59.442511 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe331087_a877_43ca_b610_5dadd6c76032.slice/crio-cc77246ad60609c15c3b1ce4d1e67ae989150270a8e2d9a9ca99578caa4cea87 WatchSource:0}: Error finding container cc77246ad60609c15c3b1ce4d1e67ae989150270a8e2d9a9ca99578caa4cea87: Status 404 returned error can't find the container with id cc77246ad60609c15c3b1ce4d1e67ae989150270a8e2d9a9ca99578caa4cea87 Nov 21 15:59:59 crc kubenswrapper[4967]: I1121 15:59:59.937677 4967 generic.go:334] "Generic (PLEG): container finished" podID="be331087-a877-43ca-b610-5dadd6c76032" containerID="83cef12a2ba73d18acdabb4feb571eb18913edcae9d2afffbcfe59fed1827531" exitCode=0 Nov 21 15:59:59 crc kubenswrapper[4967]: I1121 15:59:59.937752 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerDied","Data":"83cef12a2ba73d18acdabb4feb571eb18913edcae9d2afffbcfe59fed1827531"} Nov 21 15:59:59 crc kubenswrapper[4967]: I1121 15:59:59.938913 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerStarted","Data":"cc77246ad60609c15c3b1ce4d1e67ae989150270a8e2d9a9ca99578caa4cea87"} Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.148412 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l"] Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.150520 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.152852 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.153152 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.161607 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l"] Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.219472 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqczm\" (UniqueName: \"kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.219678 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.219733 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.321459 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.321696 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.321812 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqczm\" (UniqueName: \"kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.322845 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.328219 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.339959 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqczm\" (UniqueName: \"kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm\") pod \"collect-profiles-29395680-vhk6l\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.472085 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.959442 4967 generic.go:334] "Generic (PLEG): container finished" podID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerID="e0fcd1c44a2b65ec35a57a1a20096e11ad803f5f6fc5055cd69d42be36af1e9a" exitCode=0 Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.959527 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerDied","Data":"e0fcd1c44a2b65ec35a57a1a20096e11ad803f5f6fc5055cd69d42be36af1e9a"} Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.966977 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerDied","Data":"c6ad87cde40081fed81919b409e55977f01e1ca0bd2ceead042c856d0108613b"} Nov 21 16:00:00 crc kubenswrapper[4967]: I1121 16:00:00.966966 4967 generic.go:334] "Generic (PLEG): container finished" podID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerID="c6ad87cde40081fed81919b409e55977f01e1ca0bd2ceead042c856d0108613b" exitCode=0 Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.108743 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l"] Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.192874 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.347941 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.348052 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.349111 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.349646 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.349808 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhblm\" (UniqueName: \"kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.349892 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle\") pod \"fc6a5faf-5d84-46d7-929c-3acbef695c05\" (UID: \"fc6a5faf-5d84-46d7-929c-3acbef695c05\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.350668 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs" (OuterVolumeSpecName: "logs") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.354587 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm" (OuterVolumeSpecName: "kube-api-access-mhblm") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "kube-api-access-mhblm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.360942 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.388604 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.392079 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data" (OuterVolumeSpecName: "config-data") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.436393 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.451481 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs\") pod \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.451561 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle\") pod \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.451602 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lls4n\" (UniqueName: \"kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n\") pod \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.451645 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data\") pod \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.451667 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs\") pod \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\" (UID: \"ba8ea3e3-1dbe-4067-84bc-d8150d317245\") " Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.452035 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fc6a5faf-5d84-46d7-929c-3acbef695c05-logs\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.452052 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhblm\" (UniqueName: \"kubernetes.io/projected/fc6a5faf-5d84-46d7-929c-3acbef695c05-kube-api-access-mhblm\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.452065 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.452075 4967 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.452084 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.454163 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs" (OuterVolumeSpecName: "logs") pod "ba8ea3e3-1dbe-4067-84bc-d8150d317245" (UID: "ba8ea3e3-1dbe-4067-84bc-d8150d317245"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.456717 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n" (OuterVolumeSpecName: "kube-api-access-lls4n") pod "ba8ea3e3-1dbe-4067-84bc-d8150d317245" (UID: "ba8ea3e3-1dbe-4067-84bc-d8150d317245"). InnerVolumeSpecName "kube-api-access-lls4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.460691 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fc6a5faf-5d84-46d7-929c-3acbef695c05" (UID: "fc6a5faf-5d84-46d7-929c-3acbef695c05"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.497074 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba8ea3e3-1dbe-4067-84bc-d8150d317245" (UID: "ba8ea3e3-1dbe-4067-84bc-d8150d317245"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.500361 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data" (OuterVolumeSpecName: "config-data") pod "ba8ea3e3-1dbe-4067-84bc-d8150d317245" (UID: "ba8ea3e3-1dbe-4067-84bc-d8150d317245"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.531147 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "ba8ea3e3-1dbe-4067-84bc-d8150d317245" (UID: "ba8ea3e3-1dbe-4067-84bc-d8150d317245"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556347 4967 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fc6a5faf-5d84-46d7-929c-3acbef695c05-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556424 4967 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556447 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556460 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lls4n\" (UniqueName: \"kubernetes.io/projected/ba8ea3e3-1dbe-4067-84bc-d8150d317245-kube-api-access-lls4n\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556471 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba8ea3e3-1dbe-4067-84bc-d8150d317245-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.556483 4967 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ba8ea3e3-1dbe-4067-84bc-d8150d317245-logs\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.921695 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.981327 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fc6a5faf-5d84-46d7-929c-3acbef695c05","Type":"ContainerDied","Data":"e7312a1a73db8210b1563ba5d756b30010eb732df8162b4e64424d89355e4765"} Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.981610 4967 scope.go:117] "RemoveContainer" containerID="c6ad87cde40081fed81919b409e55977f01e1ca0bd2ceead042c856d0108613b" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.981842 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.993679 4967 generic.go:334] "Generic (PLEG): container finished" podID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" exitCode=0 Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.993801 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.994134 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55709a17-4e2d-462e-ade7-ffde9eff2488","Type":"ContainerDied","Data":"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482"} Nov 21 16:00:01 crc kubenswrapper[4967]: I1121 16:00:01.994193 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"55709a17-4e2d-462e-ade7-ffde9eff2488","Type":"ContainerDied","Data":"56c2e89516307ad64416f72cbe8733e885de2a330fae80bda50322a86ea3aac6"} Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.000919 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" event={"ID":"571c24e7-a9aa-4b5f-812e-be7b2ad9154a","Type":"ContainerStarted","Data":"a6f304a1d098c6a9379f376126e217bedd1f07a3ebd8f27a03223f968385a270"} Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.000961 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" event={"ID":"571c24e7-a9aa-4b5f-812e-be7b2ad9154a","Type":"ContainerStarted","Data":"bb4047ae0bb4340a4daeb23dec6a24c0a4ffc228deb4814afdd8548bc0e6a6df"} Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.016394 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerStarted","Data":"69350737a3b03cc8fd81abc791ed0e9be2e9f112161822ff735060241190edbf"} Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.027880 4967 scope.go:117] "RemoveContainer" containerID="4376347914d2f13416d98010597aa419307e55bb56b84b27d25fb1c44d8d0949" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.076151 4967 scope.go:117] "RemoveContainer" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.076474 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.077446 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data\") pod \"55709a17-4e2d-462e-ade7-ffde9eff2488\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.077545 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5k4jz\" (UniqueName: \"kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz\") pod \"55709a17-4e2d-462e-ade7-ffde9eff2488\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.078352 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ba8ea3e3-1dbe-4067-84bc-d8150d317245","Type":"ContainerDied","Data":"5ad77989c284faa7c3df466d5649d8df242daa7b0c3b273776c3e31216cf1af2"} Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.078595 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.080494 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle\") pod \"55709a17-4e2d-462e-ade7-ffde9eff2488\" (UID: \"55709a17-4e2d-462e-ade7-ffde9eff2488\") " Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.087322 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz" (OuterVolumeSpecName: "kube-api-access-5k4jz") pod "55709a17-4e2d-462e-ade7-ffde9eff2488" (UID: "55709a17-4e2d-462e-ade7-ffde9eff2488"). InnerVolumeSpecName "kube-api-access-5k4jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.098445 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5k4jz\" (UniqueName: \"kubernetes.io/projected/55709a17-4e2d-462e-ade7-ffde9eff2488-kube-api-access-5k4jz\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.124752 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.143853 4967 scope.go:117] "RemoveContainer" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144169 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482\": container with ID starting with 21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482 not found: ID does not exist" containerID="21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144198 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482"} err="failed to get container status \"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482\": rpc error: code = NotFound desc = could not find container \"21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482\": container with ID starting with 21bbeb1ea79bc22850fc3150ea885b9de88d299389094dedd13acede98665482 not found: ID does not exist" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144217 4967 scope.go:117] "RemoveContainer" containerID="e0fcd1c44a2b65ec35a57a1a20096e11ad803f5f6fc5055cd69d42be36af1e9a" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144298 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144812 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-log" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144833 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-log" Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144873 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerName="nova-scheduler-scheduler" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144880 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerName="nova-scheduler-scheduler" Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144895 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-metadata" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144901 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-metadata" Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144918 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-log" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144927 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-log" Nov 21 16:00:02 crc kubenswrapper[4967]: E1121 16:00:02.144946 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-api" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.144952 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-api" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.145152 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-metadata" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.145168 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-log" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.145180 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" containerName="nova-scheduler-scheduler" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.145201 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" containerName="nova-metadata-log" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.145213 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" containerName="nova-api-api" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.146650 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.154887 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.155026 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.155253 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.161281 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data" (OuterVolumeSpecName: "config-data") pod "55709a17-4e2d-462e-ade7-ffde9eff2488" (UID: "55709a17-4e2d-462e-ade7-ffde9eff2488"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.166530 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" podStartSLOduration=2.166496519 podStartE2EDuration="2.166496519s" podCreationTimestamp="2025-11-21 16:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:00:02.075293789 +0000 UTC m=+1490.333814797" watchObservedRunningTime="2025-11-21 16:00:02.166496519 +0000 UTC m=+1490.425017537" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.187395 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200465 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200611 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbqgj\" (UniqueName: \"kubernetes.io/projected/a479d799-9383-4d3a-bdd5-f4987c29d00b-kube-api-access-wbqgj\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200678 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-public-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200750 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-config-data\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200829 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200875 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a479d799-9383-4d3a-bdd5-f4987c29d00b-logs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.200988 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.209896 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55709a17-4e2d-462e-ade7-ffde9eff2488" (UID: "55709a17-4e2d-462e-ade7-ffde9eff2488"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.210138 4967 scope.go:117] "RemoveContainer" containerID="b6ce8922feb2be49e6d43ef28dc9f4d335e51b3e03aa42c925c35ba785026195" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.229825 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.243502 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.265559 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.268724 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.270708 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.271214 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.278964 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303568 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303641 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ae1faf-e6af-4799-bf8e-3fd8d975235c-logs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303670 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303701 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ccj5\" (UniqueName: \"kubernetes.io/projected/69ae1faf-e6af-4799-bf8e-3fd8d975235c-kube-api-access-4ccj5\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303748 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbqgj\" (UniqueName: \"kubernetes.io/projected/a479d799-9383-4d3a-bdd5-f4987c29d00b-kube-api-access-wbqgj\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303768 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-config-data\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303936 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-public-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.303998 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-config-data\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.304066 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.304099 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.304120 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a479d799-9383-4d3a-bdd5-f4987c29d00b-logs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.304218 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55709a17-4e2d-462e-ade7-ffde9eff2488-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.304612 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a479d799-9383-4d3a-bdd5-f4987c29d00b-logs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.308980 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-public-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.309038 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-config-data\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.309796 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.311302 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a479d799-9383-4d3a-bdd5-f4987c29d00b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.324043 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbqgj\" (UniqueName: \"kubernetes.io/projected/a479d799-9383-4d3a-bdd5-f4987c29d00b-kube-api-access-wbqgj\") pod \"nova-api-0\" (UID: \"a479d799-9383-4d3a-bdd5-f4987c29d00b\") " pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.407576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-config-data\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.407830 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.408055 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ae1faf-e6af-4799-bf8e-3fd8d975235c-logs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.408098 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.408143 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ccj5\" (UniqueName: \"kubernetes.io/projected/69ae1faf-e6af-4799-bf8e-3fd8d975235c-kube-api-access-4ccj5\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.408558 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ae1faf-e6af-4799-bf8e-3fd8d975235c-logs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.413029 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.413854 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.414445 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ae1faf-e6af-4799-bf8e-3fd8d975235c-config-data\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.431697 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ccj5\" (UniqueName: \"kubernetes.io/projected/69ae1faf-e6af-4799-bf8e-3fd8d975235c-kube-api-access-4ccj5\") pod \"nova-metadata-0\" (UID: \"69ae1faf-e6af-4799-bf8e-3fd8d975235c\") " pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.482293 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.576015 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba8ea3e3-1dbe-4067-84bc-d8150d317245" path="/var/lib/kubelet/pods/ba8ea3e3-1dbe-4067-84bc-d8150d317245/volumes" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.581072 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc6a5faf-5d84-46d7-929c-3acbef695c05" path="/var/lib/kubelet/pods/fc6a5faf-5d84-46d7-929c-3acbef695c05/volumes" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.604157 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.620385 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.654205 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.670534 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.679774 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.682738 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.723008 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.725968 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-config-data\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.726433 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.727049 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6qm5\" (UniqueName: \"kubernetes.io/projected/15496beb-fac6-4c92-b831-0e553160acd8-kube-api-access-v6qm5\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.829700 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.829842 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6qm5\" (UniqueName: \"kubernetes.io/projected/15496beb-fac6-4c92-b831-0e553160acd8-kube-api-access-v6qm5\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.829961 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-config-data\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.847102 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-config-data\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.854827 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6qm5\" (UniqueName: \"kubernetes.io/projected/15496beb-fac6-4c92-b831-0e553160acd8-kube-api-access-v6qm5\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:02 crc kubenswrapper[4967]: I1121 16:00:02.856523 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15496beb-fac6-4c92-b831-0e553160acd8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"15496beb-fac6-4c92-b831-0e553160acd8\") " pod="openstack/nova-scheduler-0" Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.023965 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.038998 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 16:00:03 crc kubenswrapper[4967]: W1121 16:00:03.044006 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda479d799_9383_4d3a_bdd5_f4987c29d00b.slice/crio-d8468e2bf2d26e36398625837a7c91846f7e0e91a5a5ddac8b8094035df96969 WatchSource:0}: Error finding container d8468e2bf2d26e36398625837a7c91846f7e0e91a5a5ddac8b8094035df96969: Status 404 returned error can't find the container with id d8468e2bf2d26e36398625837a7c91846f7e0e91a5a5ddac8b8094035df96969 Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.094120 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a479d799-9383-4d3a-bdd5-f4987c29d00b","Type":"ContainerStarted","Data":"d8468e2bf2d26e36398625837a7c91846f7e0e91a5a5ddac8b8094035df96969"} Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.103625 4967 generic.go:334] "Generic (PLEG): container finished" podID="571c24e7-a9aa-4b5f-812e-be7b2ad9154a" containerID="a6f304a1d098c6a9379f376126e217bedd1f07a3ebd8f27a03223f968385a270" exitCode=0 Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.103699 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" event={"ID":"571c24e7-a9aa-4b5f-812e-be7b2ad9154a","Type":"ContainerDied","Data":"a6f304a1d098c6a9379f376126e217bedd1f07a3ebd8f27a03223f968385a270"} Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.230147 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 16:00:03 crc kubenswrapper[4967]: I1121 16:00:03.520866 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 16:00:03 crc kubenswrapper[4967]: W1121 16:00:03.521930 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod15496beb_fac6_4c92_b831_0e553160acd8.slice/crio-5ee83c884048c3bb03484f6ce8b87c77e17638441a21b8789905f46bc06ee35b WatchSource:0}: Error finding container 5ee83c884048c3bb03484f6ce8b87c77e17638441a21b8789905f46bc06ee35b: Status 404 returned error can't find the container with id 5ee83c884048c3bb03484f6ce8b87c77e17638441a21b8789905f46bc06ee35b Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.135217 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ae1faf-e6af-4799-bf8e-3fd8d975235c","Type":"ContainerStarted","Data":"029ae125396a9a78335d1bef9b6f845fd41ba3f454be695555ed2814392100d5"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.136393 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ae1faf-e6af-4799-bf8e-3fd8d975235c","Type":"ContainerStarted","Data":"257ee497b9c83623c3c7be17da61b2f6ca836efbb6ab2663dbcf8329c7f70c93"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.136417 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ae1faf-e6af-4799-bf8e-3fd8d975235c","Type":"ContainerStarted","Data":"ad4a9149e7cc1911f540f40b26b7fa3beab5d64331ae5009e51018e421239b21"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.138443 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"15496beb-fac6-4c92-b831-0e553160acd8","Type":"ContainerStarted","Data":"b4ace88adab50db21bffd4592f1ff89bfcd212976884589d76981000bf16974f"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.138483 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"15496beb-fac6-4c92-b831-0e553160acd8","Type":"ContainerStarted","Data":"5ee83c884048c3bb03484f6ce8b87c77e17638441a21b8789905f46bc06ee35b"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.149782 4967 generic.go:334] "Generic (PLEG): container finished" podID="be331087-a877-43ca-b610-5dadd6c76032" containerID="69350737a3b03cc8fd81abc791ed0e9be2e9f112161822ff735060241190edbf" exitCode=0 Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.149840 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerDied","Data":"69350737a3b03cc8fd81abc791ed0e9be2e9f112161822ff735060241190edbf"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.162909 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a479d799-9383-4d3a-bdd5-f4987c29d00b","Type":"ContainerStarted","Data":"bc102bb1803d637a1d4973e2a9635c79b45607fab7d58d7c4d37b77ae4fde406"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.162954 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a479d799-9383-4d3a-bdd5-f4987c29d00b","Type":"ContainerStarted","Data":"7b71ba2270f3335138a5d2348d38107f43d95e3a7aaa2115646f5e943fb134be"} Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.164835 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.164816751 podStartE2EDuration="2.164816751s" podCreationTimestamp="2025-11-21 16:00:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:00:04.158793974 +0000 UTC m=+1492.417314982" watchObservedRunningTime="2025-11-21 16:00:04.164816751 +0000 UTC m=+1492.423337759" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.183659 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.183639785 podStartE2EDuration="2.183639785s" podCreationTimestamp="2025-11-21 16:00:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:00:04.175262998 +0000 UTC m=+1492.433784006" watchObservedRunningTime="2025-11-21 16:00:04.183639785 +0000 UTC m=+1492.442160793" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.227818 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.227796873 podStartE2EDuration="2.227796873s" podCreationTimestamp="2025-11-21 16:00:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:00:04.211472383 +0000 UTC m=+1492.469993411" watchObservedRunningTime="2025-11-21 16:00:04.227796873 +0000 UTC m=+1492.486317881" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.552684 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55709a17-4e2d-462e-ade7-ffde9eff2488" path="/var/lib/kubelet/pods/55709a17-4e2d-462e-ade7-ffde9eff2488/volumes" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.611418 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.792457 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume\") pod \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.792604 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume\") pod \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.792660 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqczm\" (UniqueName: \"kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm\") pod \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\" (UID: \"571c24e7-a9aa-4b5f-812e-be7b2ad9154a\") " Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.793302 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume" (OuterVolumeSpecName: "config-volume") pod "571c24e7-a9aa-4b5f-812e-be7b2ad9154a" (UID: "571c24e7-a9aa-4b5f-812e-be7b2ad9154a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.793946 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.798791 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm" (OuterVolumeSpecName: "kube-api-access-hqczm") pod "571c24e7-a9aa-4b5f-812e-be7b2ad9154a" (UID: "571c24e7-a9aa-4b5f-812e-be7b2ad9154a"). InnerVolumeSpecName "kube-api-access-hqczm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.798895 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "571c24e7-a9aa-4b5f-812e-be7b2ad9154a" (UID: "571c24e7-a9aa-4b5f-812e-be7b2ad9154a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.896661 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:04 crc kubenswrapper[4967]: I1121 16:00:04.896701 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqczm\" (UniqueName: \"kubernetes.io/projected/571c24e7-a9aa-4b5f-812e-be7b2ad9154a-kube-api-access-hqczm\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:05 crc kubenswrapper[4967]: I1121 16:00:05.177128 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerStarted","Data":"5c008bd7bf9d504e5e68b0d89957c9c8dd5dfbd5b13d0f5febf28436e88645f5"} Nov 21 16:00:05 crc kubenswrapper[4967]: I1121 16:00:05.179548 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" event={"ID":"571c24e7-a9aa-4b5f-812e-be7b2ad9154a","Type":"ContainerDied","Data":"bb4047ae0bb4340a4daeb23dec6a24c0a4ffc228deb4814afdd8548bc0e6a6df"} Nov 21 16:00:05 crc kubenswrapper[4967]: I1121 16:00:05.179586 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb4047ae0bb4340a4daeb23dec6a24c0a4ffc228deb4814afdd8548bc0e6a6df" Nov 21 16:00:05 crc kubenswrapper[4967]: I1121 16:00:05.179798 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l" Nov 21 16:00:05 crc kubenswrapper[4967]: I1121 16:00:05.206038 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jsxjk" podStartSLOduration=2.41760509 podStartE2EDuration="7.206002758s" podCreationTimestamp="2025-11-21 15:59:58 +0000 UTC" firstStartedPulling="2025-11-21 15:59:59.939478705 +0000 UTC m=+1488.197999713" lastFinishedPulling="2025-11-21 16:00:04.727876373 +0000 UTC m=+1492.986397381" observedRunningTime="2025-11-21 16:00:05.195379446 +0000 UTC m=+1493.453900454" watchObservedRunningTime="2025-11-21 16:00:05.206002758 +0000 UTC m=+1493.464523766" Nov 21 16:00:06 crc kubenswrapper[4967]: I1121 16:00:06.418874 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" probeResult="failure" output=< Nov 21 16:00:06 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:00:06 crc kubenswrapper[4967]: > Nov 21 16:00:07 crc kubenswrapper[4967]: I1121 16:00:07.605209 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 16:00:07 crc kubenswrapper[4967]: I1121 16:00:07.605377 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 16:00:08 crc kubenswrapper[4967]: I1121 16:00:08.024476 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 16:00:08 crc kubenswrapper[4967]: I1121 16:00:08.898444 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:08 crc kubenswrapper[4967]: I1121 16:00:08.898509 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:08 crc kubenswrapper[4967]: I1121 16:00:08.946895 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:10 crc kubenswrapper[4967]: I1121 16:00:10.310040 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 16:00:12 crc kubenswrapper[4967]: I1121 16:00:12.482785 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 16:00:12 crc kubenswrapper[4967]: I1121 16:00:12.483986 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 16:00:12 crc kubenswrapper[4967]: I1121 16:00:12.604889 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 16:00:12 crc kubenswrapper[4967]: I1121 16:00:12.604939 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.024350 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.067215 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.330328 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.498457 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a479d799-9383-4d3a-bdd5-f4987c29d00b" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.5:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.498457 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a479d799-9383-4d3a-bdd5-f4987c29d00b" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.5:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.618459 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="69ae1faf-e6af-4799-bf8e-3fd8d975235c" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 16:00:13 crc kubenswrapper[4967]: I1121 16:00:13.618545 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="69ae1faf-e6af-4799-bf8e-3fd8d975235c" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.6:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 16:00:15 crc kubenswrapper[4967]: I1121 16:00:15.415509 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 16:00:15 crc kubenswrapper[4967]: I1121 16:00:15.469197 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 16:00:15 crc kubenswrapper[4967]: I1121 16:00:15.655368 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.334003 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fzqdr" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" containerID="cri-o://3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470" gracePeriod=2 Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.830556 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.910301 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content\") pod \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.910459 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities\") pod \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.910526 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8fwt\" (UniqueName: \"kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt\") pod \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\" (UID: \"d18f04e1-396e-462a-aa4c-c9caeb4523ed\") " Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.911001 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities" (OuterVolumeSpecName: "utilities") pod "d18f04e1-396e-462a-aa4c-c9caeb4523ed" (UID: "d18f04e1-396e-462a-aa4c-c9caeb4523ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.911262 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:17 crc kubenswrapper[4967]: I1121 16:00:17.917490 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt" (OuterVolumeSpecName: "kube-api-access-z8fwt") pod "d18f04e1-396e-462a-aa4c-c9caeb4523ed" (UID: "d18f04e1-396e-462a-aa4c-c9caeb4523ed"). InnerVolumeSpecName "kube-api-access-z8fwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.003768 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d18f04e1-396e-462a-aa4c-c9caeb4523ed" (UID: "d18f04e1-396e-462a-aa4c-c9caeb4523ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.013196 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d18f04e1-396e-462a-aa4c-c9caeb4523ed-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.013231 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8fwt\" (UniqueName: \"kubernetes.io/projected/d18f04e1-396e-462a-aa4c-c9caeb4523ed-kube-api-access-z8fwt\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.350827 4967 generic.go:334] "Generic (PLEG): container finished" podID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerID="3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470" exitCode=0 Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.350887 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerDied","Data":"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470"} Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.350933 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzqdr" event={"ID":"d18f04e1-396e-462a-aa4c-c9caeb4523ed","Type":"ContainerDied","Data":"e7ddda101601c84b79c16fdfa35f82bf682c5d23b7eeb298eb60c1cd22f5def9"} Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.350933 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzqdr" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.350958 4967 scope.go:117] "RemoveContainer" containerID="3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.380903 4967 scope.go:117] "RemoveContainer" containerID="7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.392127 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.405999 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fzqdr"] Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.410493 4967 scope.go:117] "RemoveContainer" containerID="f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.458099 4967 scope.go:117] "RemoveContainer" containerID="3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470" Nov 21 16:00:18 crc kubenswrapper[4967]: E1121 16:00:18.458644 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470\": container with ID starting with 3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470 not found: ID does not exist" containerID="3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.458694 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470"} err="failed to get container status \"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470\": rpc error: code = NotFound desc = could not find container \"3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470\": container with ID starting with 3d314cf524106215e2cac2b3828542ece2c19935b810f4fba1e5ca8b5ab25470 not found: ID does not exist" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.458728 4967 scope.go:117] "RemoveContainer" containerID="7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532" Nov 21 16:00:18 crc kubenswrapper[4967]: E1121 16:00:18.459038 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532\": container with ID starting with 7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532 not found: ID does not exist" containerID="7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.459064 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532"} err="failed to get container status \"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532\": rpc error: code = NotFound desc = could not find container \"7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532\": container with ID starting with 7ed265e832a10ddd90ac9fe8f7a59cc9837d414e49dc7208df4e7da73d800532 not found: ID does not exist" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.459080 4967 scope.go:117] "RemoveContainer" containerID="f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c" Nov 21 16:00:18 crc kubenswrapper[4967]: E1121 16:00:18.459468 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c\": container with ID starting with f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c not found: ID does not exist" containerID="f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.459501 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c"} err="failed to get container status \"f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c\": rpc error: code = NotFound desc = could not find container \"f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c\": container with ID starting with f4e3c724f3e727c67cb7fe5d2313fb42d9704b48001a9f574fe62b7edf94f02c not found: ID does not exist" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.550948 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" path="/var/lib/kubelet/pods/d18f04e1-396e-462a-aa4c-c9caeb4523ed/volumes" Nov 21 16:00:18 crc kubenswrapper[4967]: I1121 16:00:18.943364 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.055217 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.055776 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jsxjk" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="registry-server" containerID="cri-o://5c008bd7bf9d504e5e68b0d89957c9c8dd5dfbd5b13d0f5febf28436e88645f5" gracePeriod=2 Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.394200 4967 generic.go:334] "Generic (PLEG): container finished" podID="be331087-a877-43ca-b610-5dadd6c76032" containerID="5c008bd7bf9d504e5e68b0d89957c9c8dd5dfbd5b13d0f5febf28436e88645f5" exitCode=0 Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.394507 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerDied","Data":"5c008bd7bf9d504e5e68b0d89957c9c8dd5dfbd5b13d0f5febf28436e88645f5"} Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.582872 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.695725 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities\") pod \"be331087-a877-43ca-b610-5dadd6c76032\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.696180 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5lkf\" (UniqueName: \"kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf\") pod \"be331087-a877-43ca-b610-5dadd6c76032\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.696340 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content\") pod \"be331087-a877-43ca-b610-5dadd6c76032\" (UID: \"be331087-a877-43ca-b610-5dadd6c76032\") " Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.696502 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities" (OuterVolumeSpecName: "utilities") pod "be331087-a877-43ca-b610-5dadd6c76032" (UID: "be331087-a877-43ca-b610-5dadd6c76032"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.697080 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.714897 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf" (OuterVolumeSpecName: "kube-api-access-k5lkf") pod "be331087-a877-43ca-b610-5dadd6c76032" (UID: "be331087-a877-43ca-b610-5dadd6c76032"). InnerVolumeSpecName "kube-api-access-k5lkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.800037 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5lkf\" (UniqueName: \"kubernetes.io/projected/be331087-a877-43ca-b610-5dadd6c76032-kube-api-access-k5lkf\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.808707 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be331087-a877-43ca-b610-5dadd6c76032" (UID: "be331087-a877-43ca-b610-5dadd6c76032"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:21 crc kubenswrapper[4967]: I1121 16:00:21.902111 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be331087-a877-43ca-b610-5dadd6c76032-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.409353 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jsxjk" event={"ID":"be331087-a877-43ca-b610-5dadd6c76032","Type":"ContainerDied","Data":"cc77246ad60609c15c3b1ce4d1e67ae989150270a8e2d9a9ca99578caa4cea87"} Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.409417 4967 scope.go:117] "RemoveContainer" containerID="5c008bd7bf9d504e5e68b0d89957c9c8dd5dfbd5b13d0f5febf28436e88645f5" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.409425 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jsxjk" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.433102 4967 scope.go:117] "RemoveContainer" containerID="69350737a3b03cc8fd81abc791ed0e9be2e9f112161822ff735060241190edbf" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.451153 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.465563 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jsxjk"] Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.471377 4967 scope.go:117] "RemoveContainer" containerID="83cef12a2ba73d18acdabb4feb571eb18913edcae9d2afffbcfe59fed1827531" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.491073 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.491772 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.492381 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.497879 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.553601 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be331087-a877-43ca-b610-5dadd6c76032" path="/var/lib/kubelet/pods/be331087-a877-43ca-b610-5dadd6c76032/volumes" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.609258 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.610710 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 16:00:22 crc kubenswrapper[4967]: I1121 16:00:22.614962 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 16:00:23 crc kubenswrapper[4967]: I1121 16:00:23.422362 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 16:00:23 crc kubenswrapper[4967]: I1121 16:00:23.426999 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 16:00:23 crc kubenswrapper[4967]: I1121 16:00:23.429420 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 16:00:31 crc kubenswrapper[4967]: I1121 16:00:31.494185 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:32 crc kubenswrapper[4967]: I1121 16:00:32.445751 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:36 crc kubenswrapper[4967]: I1121 16:00:36.780786 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="rabbitmq" containerID="cri-o://8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618" gracePeriod=604795 Nov 21 16:00:37 crc kubenswrapper[4967]: I1121 16:00:37.851122 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="rabbitmq" containerID="cri-o://02d79e1d71d3c681e219adebcff3bc382e41de298e14be461f68c747348a0a41" gracePeriod=604795 Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.495394 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.616839 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.617502 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.617674 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.617857 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2sqg\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618000 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618086 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618187 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618348 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618470 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618550 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.618635 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls\") pod \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\" (UID: \"1a455b5d-516b-4e43-a717-f7aa6e326ee8\") " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.622657 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.623252 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.623835 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info" (OuterVolumeSpecName: "pod-info") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.625058 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.628865 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.647266 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.656419 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg" (OuterVolumeSpecName: "kube-api-access-k2sqg") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "kube-api-access-k2sqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.658842 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.679167 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data" (OuterVolumeSpecName: "config-data") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.683677 4967 generic.go:334] "Generic (PLEG): container finished" podID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerID="8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618" exitCode=0 Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.683724 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerDied","Data":"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618"} Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.683753 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"1a455b5d-516b-4e43-a717-f7aa6e326ee8","Type":"ContainerDied","Data":"f6c7bbe292a88d7bc98bdc9745c09ebc4c2ecb4fa1043681c797a36ea49eac60"} Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.683771 4967 scope.go:117] "RemoveContainer" containerID="8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.683987 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.725195 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.725235 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726652 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2sqg\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-kube-api-access-k2sqg\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726676 4967 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a455b5d-516b-4e43-a717-f7aa6e326ee8-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726688 4967 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a455b5d-516b-4e43-a717-f7aa6e326ee8-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726696 4967 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726705 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726713 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.726722 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.773455 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.780786 4967 scope.go:117] "RemoveContainer" containerID="a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.794934 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf" (OuterVolumeSpecName: "server-conf") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.828629 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.828672 4967 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a455b5d-516b-4e43-a717-f7aa6e326ee8-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.869438 4967 scope.go:117] "RemoveContainer" containerID="8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618" Nov 21 16:00:43 crc kubenswrapper[4967]: E1121 16:00:43.870357 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618\": container with ID starting with 8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618 not found: ID does not exist" containerID="8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.870396 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618"} err="failed to get container status \"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618\": rpc error: code = NotFound desc = could not find container \"8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618\": container with ID starting with 8488f2e8aab08ef2629317fbaa7f522860a9202e49506daeea29139b792ea618 not found: ID does not exist" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.870424 4967 scope.go:117] "RemoveContainer" containerID="a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad" Nov 21 16:00:43 crc kubenswrapper[4967]: E1121 16:00:43.871993 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad\": container with ID starting with a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad not found: ID does not exist" containerID="a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.872058 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad"} err="failed to get container status \"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad\": rpc error: code = NotFound desc = could not find container \"a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad\": container with ID starting with a750152ee1f045650b939c339bc2d5965490eeb8a6e4045612999da96dba7fad not found: ID does not exist" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.874105 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "1a455b5d-516b-4e43-a717-f7aa6e326ee8" (UID: "1a455b5d-516b-4e43-a717-f7aa6e326ee8"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:43 crc kubenswrapper[4967]: I1121 16:00:43.931365 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a455b5d-516b-4e43-a717-f7aa6e326ee8-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.084869 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.099154 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.124952 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125525 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571c24e7-a9aa-4b5f-812e-be7b2ad9154a" containerName="collect-profiles" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125543 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="571c24e7-a9aa-4b5f-812e-be7b2ad9154a" containerName="collect-profiles" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125572 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="extract-utilities" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125579 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="extract-utilities" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125597 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="extract-content" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125604 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="extract-content" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125620 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="rabbitmq" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125627 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="rabbitmq" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125646 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125653 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125678 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="extract-utilities" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125686 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="extract-utilities" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125710 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="setup-container" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.125718 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="setup-container" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.125735 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="extract-content" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126420 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="extract-content" Nov 21 16:00:44 crc kubenswrapper[4967]: E1121 16:00:44.126450 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126458 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126742 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" containerName="rabbitmq" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126760 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="be331087-a877-43ca-b610-5dadd6c76032" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126782 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="571c24e7-a9aa-4b5f-812e-be7b2ad9154a" containerName="collect-profiles" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.126811 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d18f04e1-396e-462a-aa4c-c9caeb4523ed" containerName="registry-server" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.128332 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.131412 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.132167 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.132369 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.132562 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-dmgt2" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.136509 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.136566 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.136713 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.144019 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241148 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/235ca898-447b-4df0-9aef-3bf2bc1719ce-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241278 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241300 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4fgq\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-kube-api-access-j4fgq\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241332 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241386 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241408 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241448 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241480 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/235ca898-447b-4df0-9aef-3bf2bc1719ce-pod-info\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241537 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-server-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.241564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-config-data\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.343867 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4fgq\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-kube-api-access-j4fgq\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.343927 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.343986 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344021 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344140 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344621 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344672 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344736 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/235ca898-447b-4df0-9aef-3bf2bc1719ce-pod-info\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344851 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-server-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.344892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-config-data\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.345001 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.346205 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/235ca898-447b-4df0-9aef-3bf2bc1719ce-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.346419 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-server-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.346576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.347351 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.348601 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.348948 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/235ca898-447b-4df0-9aef-3bf2bc1719ce-config-data\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.350454 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.350464 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.351473 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/235ca898-447b-4df0-9aef-3bf2bc1719ce-pod-info\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.362992 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/235ca898-447b-4df0-9aef-3bf2bc1719ce-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.367048 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4fgq\" (UniqueName: \"kubernetes.io/projected/235ca898-447b-4df0-9aef-3bf2bc1719ce-kube-api-access-j4fgq\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.419695 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-server-0\" (UID: \"235ca898-447b-4df0-9aef-3bf2bc1719ce\") " pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.451593 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.550512 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a455b5d-516b-4e43-a717-f7aa6e326ee8" path="/var/lib/kubelet/pods/1a455b5d-516b-4e43-a717-f7aa6e326ee8/volumes" Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.737219 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerDied","Data":"02d79e1d71d3c681e219adebcff3bc382e41de298e14be461f68c747348a0a41"} Nov 21 16:00:44 crc kubenswrapper[4967]: I1121 16:00:44.737220 4967 generic.go:334] "Generic (PLEG): container finished" podID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerID="02d79e1d71d3c681e219adebcff3bc382e41de298e14be461f68c747348a0a41" exitCode=0 Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.134282 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.433056 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485154 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485235 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485341 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485372 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2q9d\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485417 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485480 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485503 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485577 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485613 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485662 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.485795 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie\") pod \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\" (UID: \"d96c12a3-6ce4-40f6-a655-0881d711f9fa\") " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.495592 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d" (OuterVolumeSpecName: "kube-api-access-f2q9d") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "kube-api-access-f2q9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.495598 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "persistence") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.495635 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.496110 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.496663 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.496903 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info" (OuterVolumeSpecName: "pod-info") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.504962 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.510233 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.560687 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data" (OuterVolumeSpecName: "config-data") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.575595 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf" (OuterVolumeSpecName: "server-conf") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589259 4967 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/d96c12a3-6ce4-40f6-a655-0881d711f9fa-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589291 4967 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589304 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589516 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589528 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589559 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589568 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589576 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2q9d\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-kube-api-access-f2q9d\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589584 4967 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/d96c12a3-6ce4-40f6-a655-0881d711f9fa-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.589593 4967 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/d96c12a3-6ce4-40f6-a655-0881d711f9fa-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.622733 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.660541 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "d96c12a3-6ce4-40f6-a655-0881d711f9fa" (UID: "d96c12a3-6ce4-40f6-a655-0881d711f9fa"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.692115 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.692409 4967 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/d96c12a3-6ce4-40f6-a655-0881d711f9fa-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.759829 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"d96c12a3-6ce4-40f6-a655-0881d711f9fa","Type":"ContainerDied","Data":"f08cacbda5650e3b23ba50b3bee4dbb84155050ad4c902ab7fc6bccb01daeece"} Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.759870 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.759926 4967 scope.go:117] "RemoveContainer" containerID="02d79e1d71d3c681e219adebcff3bc382e41de298e14be461f68c747348a0a41" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.761739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"235ca898-447b-4df0-9aef-3bf2bc1719ce","Type":"ContainerStarted","Data":"3176e215c2fb9138ad8770ceb97c50957fa6a3851b24f5084bdc9850ee619e16"} Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.801418 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.802560 4967 scope.go:117] "RemoveContainer" containerID="80b38e9eacd01d13b36f4476fb0cc8a6b6b79cf32296b5aa4a7ba23654c8c79c" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.815959 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.831695 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:45 crc kubenswrapper[4967]: E1121 16:00:45.832369 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="rabbitmq" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.832391 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="rabbitmq" Nov 21 16:00:45 crc kubenswrapper[4967]: E1121 16:00:45.832415 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="setup-container" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.832423 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="setup-container" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.832935 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="rabbitmq" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.834719 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.838826 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.838890 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-t9gdg" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.839108 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.839186 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.839281 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.839530 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.844988 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.845975 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896537 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2d6d9318-48b9-4b12-9532-2c449dd948a6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896618 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkcxf\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-kube-api-access-dkcxf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896640 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896674 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896706 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896759 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896793 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896831 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896868 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2d6d9318-48b9-4b12-9532-2c449dd948a6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896899 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.896931 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999044 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2d6d9318-48b9-4b12-9532-2c449dd948a6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999431 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999536 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999619 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:45 crc kubenswrapper[4967]: I1121 16:00:45.999892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2d6d9318-48b9-4b12-9532-2c449dd948a6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000056 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkcxf\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-kube-api-access-dkcxf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000147 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000236 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000344 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000474 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000568 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.001288 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.001341 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.000091 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.001469 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.002154 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2d6d9318-48b9-4b12-9532-2c449dd948a6-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.005440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2d6d9318-48b9-4b12-9532-2c449dd948a6-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.006352 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2d6d9318-48b9-4b12-9532-2c449dd948a6-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.006986 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.008888 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.022911 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkcxf\" (UniqueName: \"kubernetes.io/projected/2d6d9318-48b9-4b12-9532-2c449dd948a6-kube-api-access-dkcxf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.048781 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2d6d9318-48b9-4b12-9532-2c449dd948a6\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.157259 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.558798 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" path="/var/lib/kubelet/pods/d96c12a3-6ce4-40f6-a655-0881d711f9fa/volumes" Nov 21 16:00:46 crc kubenswrapper[4967]: W1121 16:00:46.646335 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d6d9318_48b9_4b12_9532_2c449dd948a6.slice/crio-3c3a8ba9167d11debd6aaf51b26a74f2b123934186c8459eaf5f8f918279ee26 WatchSource:0}: Error finding container 3c3a8ba9167d11debd6aaf51b26a74f2b123934186c8459eaf5f8f918279ee26: Status 404 returned error can't find the container with id 3c3a8ba9167d11debd6aaf51b26a74f2b123934186c8459eaf5f8f918279ee26 Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.656664 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 16:00:46 crc kubenswrapper[4967]: I1121 16:00:46.778780 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2d6d9318-48b9-4b12-9532-2c449dd948a6","Type":"ContainerStarted","Data":"3c3a8ba9167d11debd6aaf51b26a74f2b123934186c8459eaf5f8f918279ee26"} Nov 21 16:00:47 crc kubenswrapper[4967]: I1121 16:00:47.789546 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"235ca898-447b-4df0-9aef-3bf2bc1719ce","Type":"ContainerStarted","Data":"6e2a1f0b1c9eb5d66883bef8afd584f902625af39ff05c45d00207996ebb681d"} Nov 21 16:00:47 crc kubenswrapper[4967]: I1121 16:00:47.999125 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.002182 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.006916 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.027042 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055287 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055364 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9z5d2\" (UniqueName: \"kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055530 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055617 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055679 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.055855 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.056034 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157710 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157783 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157811 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157837 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157902 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157982 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.157998 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9z5d2\" (UniqueName: \"kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.159198 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.159761 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.160393 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.161091 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.161268 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.162278 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.192016 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9z5d2\" (UniqueName: \"kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2\") pod \"dnsmasq-dns-5b75489c6f-gnv7w\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.366812 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.801438 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2d6d9318-48b9-4b12-9532-2c449dd948a6","Type":"ContainerStarted","Data":"e6352c76ce78558116e56c9bef48fa3af58a7e1f7f5a94bee6397c427ccc8ba1"} Nov 21 16:00:48 crc kubenswrapper[4967]: I1121 16:00:48.926003 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:00:48 crc kubenswrapper[4967]: W1121 16:00:48.926144 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66456da0_1dc2_4eb8_8608_cb314c66a628.slice/crio-d34251f072f128f86bef804e488f57cb3fa6840b128c3c44cbda9ff980fbe5cb WatchSource:0}: Error finding container d34251f072f128f86bef804e488f57cb3fa6840b128c3c44cbda9ff980fbe5cb: Status 404 returned error can't find the container with id d34251f072f128f86bef804e488f57cb3fa6840b128c3c44cbda9ff980fbe5cb Nov 21 16:00:49 crc kubenswrapper[4967]: I1121 16:00:49.812620 4967 generic.go:334] "Generic (PLEG): container finished" podID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerID="baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd" exitCode=0 Nov 21 16:00:49 crc kubenswrapper[4967]: I1121 16:00:49.812770 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" event={"ID":"66456da0-1dc2-4eb8-8608-cb314c66a628","Type":"ContainerDied","Data":"baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd"} Nov 21 16:00:49 crc kubenswrapper[4967]: I1121 16:00:49.813073 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" event={"ID":"66456da0-1dc2-4eb8-8608-cb314c66a628","Type":"ContainerStarted","Data":"d34251f072f128f86bef804e488f57cb3fa6840b128c3c44cbda9ff980fbe5cb"} Nov 21 16:00:50 crc kubenswrapper[4967]: I1121 16:00:50.336594 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="d96c12a3-6ce4-40f6-a655-0881d711f9fa" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.131:5671: i/o timeout" Nov 21 16:00:50 crc kubenswrapper[4967]: I1121 16:00:50.826442 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" event={"ID":"66456da0-1dc2-4eb8-8608-cb314c66a628","Type":"ContainerStarted","Data":"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0"} Nov 21 16:00:50 crc kubenswrapper[4967]: I1121 16:00:50.826643 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:50 crc kubenswrapper[4967]: I1121 16:00:50.854523 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" podStartSLOduration=3.854503067 podStartE2EDuration="3.854503067s" podCreationTimestamp="2025-11-21 16:00:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:00:50.845826372 +0000 UTC m=+1539.104347390" watchObservedRunningTime="2025-11-21 16:00:50.854503067 +0000 UTC m=+1539.113024075" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.369850 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.445686 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.445951 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="dnsmasq-dns" containerID="cri-o://63efc3464f37c63a3f2107684800694adc4f776ce71758995f8382d7e457df3e" gracePeriod=10 Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.622557 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-9kxh6"] Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.626246 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.647996 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-9kxh6"] Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.820208 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-config\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.820282 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.820346 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.820661 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.820805 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w849q\" (UniqueName: \"kubernetes.io/projected/c276d90d-6f12-4909-9b88-cb881f3f8b74-kube-api-access-w849q\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.821218 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.821501 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.917136 4967 generic.go:334] "Generic (PLEG): container finished" podID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerID="63efc3464f37c63a3f2107684800694adc4f776ce71758995f8382d7e457df3e" exitCode=0 Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.917172 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" event={"ID":"654b54a8-f5b2-480d-806c-b1c9a8b51e21","Type":"ContainerDied","Data":"63efc3464f37c63a3f2107684800694adc4f776ce71758995f8382d7e457df3e"} Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924069 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924146 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w849q\" (UniqueName: \"kubernetes.io/projected/c276d90d-6f12-4909-9b88-cb881f3f8b74-kube-api-access-w849q\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924259 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924351 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924442 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-config\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924483 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.924523 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927141 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927329 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927345 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927415 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927451 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-config\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.927508 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c276d90d-6f12-4909-9b88-cb881f3f8b74-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.978717 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w849q\" (UniqueName: \"kubernetes.io/projected/c276d90d-6f12-4909-9b88-cb881f3f8b74-kube-api-access-w849q\") pod \"dnsmasq-dns-5d75f767dc-9kxh6\" (UID: \"c276d90d-6f12-4909-9b88-cb881f3f8b74\") " pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:58 crc kubenswrapper[4967]: I1121 16:00:58.998108 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.229706 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.234941 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.235066 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.235213 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.235287 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwxx6\" (UniqueName: \"kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.235326 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.235352 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb\") pod \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\" (UID: \"654b54a8-f5b2-480d-806c-b1c9a8b51e21\") " Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.242151 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6" (OuterVolumeSpecName: "kube-api-access-kwxx6") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "kube-api-access-kwxx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.317210 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.331763 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config" (OuterVolumeSpecName: "config") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.332231 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.339668 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.339721 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-config\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.339733 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwxx6\" (UniqueName: \"kubernetes.io/projected/654b54a8-f5b2-480d-806c-b1c9a8b51e21-kube-api-access-kwxx6\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.339743 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.347609 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.349824 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "654b54a8-f5b2-480d-806c-b1c9a8b51e21" (UID: "654b54a8-f5b2-480d-806c-b1c9a8b51e21"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.441571 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.441613 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/654b54a8-f5b2-480d-806c-b1c9a8b51e21-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.489611 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-9kxh6"] Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.929760 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" event={"ID":"654b54a8-f5b2-480d-806c-b1c9a8b51e21","Type":"ContainerDied","Data":"3ca001a5094d6192d331ef1e12cd1cf06bdab5ebdbafb40fbae9403881d28a82"} Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.930093 4967 scope.go:117] "RemoveContainer" containerID="63efc3464f37c63a3f2107684800694adc4f776ce71758995f8382d7e457df3e" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.930024 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-kfxx4" Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.933705 4967 generic.go:334] "Generic (PLEG): container finished" podID="c276d90d-6f12-4909-9b88-cb881f3f8b74" containerID="62d92d564d06c3a5a1b8752771110ecd113e57c8189cb6931125fa171c81dbc7" exitCode=0 Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.933754 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" event={"ID":"c276d90d-6f12-4909-9b88-cb881f3f8b74","Type":"ContainerDied","Data":"62d92d564d06c3a5a1b8752771110ecd113e57c8189cb6931125fa171c81dbc7"} Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.933779 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" event={"ID":"c276d90d-6f12-4909-9b88-cb881f3f8b74","Type":"ContainerStarted","Data":"0e80cbb1b5149508a439691dfefdeb2cf21f87fe79c80240690acfac0d1845f4"} Nov 21 16:00:59 crc kubenswrapper[4967]: I1121 16:00:59.966726 4967 scope.go:117] "RemoveContainer" containerID="6f2a125e58fce7aa897e8fbb6ab534809b70f2dcc81e2ca34b405725c044324e" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.149686 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29395681-wbns7"] Nov 21 16:01:00 crc kubenswrapper[4967]: E1121 16:01:00.151793 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="dnsmasq-dns" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.151814 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="dnsmasq-dns" Nov 21 16:01:00 crc kubenswrapper[4967]: E1121 16:01:00.151847 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="init" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.151855 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="init" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.152092 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" containerName="dnsmasq-dns" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.154253 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.165077 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395681-wbns7"] Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.241669 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.254286 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-kfxx4"] Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.264376 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.264450 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.264613 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8wjv\" (UniqueName: \"kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.264840 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.366987 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.367029 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.367111 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8wjv\" (UniqueName: \"kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.367202 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.372275 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.372327 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.376033 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.393106 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8wjv\" (UniqueName: \"kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv\") pod \"keystone-cron-29395681-wbns7\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.550092 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="654b54a8-f5b2-480d-806c-b1c9a8b51e21" path="/var/lib/kubelet/pods/654b54a8-f5b2-480d-806c-b1c9a8b51e21/volumes" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.550193 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.949340 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" event={"ID":"c276d90d-6f12-4909-9b88-cb881f3f8b74","Type":"ContainerStarted","Data":"60af506e26ffa595b6b9705b92917d1c60b3ce7116cbc00b71dd0721e920fbe1"} Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.949674 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:01:00 crc kubenswrapper[4967]: I1121 16:01:00.975187 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" podStartSLOduration=2.975165451 podStartE2EDuration="2.975165451s" podCreationTimestamp="2025-11-21 16:00:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:01:00.970071202 +0000 UTC m=+1549.228592230" watchObservedRunningTime="2025-11-21 16:01:00.975165451 +0000 UTC m=+1549.233686469" Nov 21 16:01:01 crc kubenswrapper[4967]: I1121 16:01:01.026656 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395681-wbns7"] Nov 21 16:01:01 crc kubenswrapper[4967]: W1121 16:01:01.028551 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38e452a5_595e_4749_9310_48c09e18e32a.slice/crio-8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8 WatchSource:0}: Error finding container 8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8: Status 404 returned error can't find the container with id 8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8 Nov 21 16:01:01 crc kubenswrapper[4967]: I1121 16:01:01.962710 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395681-wbns7" event={"ID":"38e452a5-595e-4749-9310-48c09e18e32a","Type":"ContainerStarted","Data":"8a652e59b35c314e14eff7434e4924e47c4d8fe53cb6ff5eb64a3c8fdb9a4477"} Nov 21 16:01:01 crc kubenswrapper[4967]: I1121 16:01:01.963287 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395681-wbns7" event={"ID":"38e452a5-595e-4749-9310-48c09e18e32a","Type":"ContainerStarted","Data":"8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8"} Nov 21 16:01:01 crc kubenswrapper[4967]: I1121 16:01:01.982503 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29395681-wbns7" podStartSLOduration=1.982485493 podStartE2EDuration="1.982485493s" podCreationTimestamp="2025-11-21 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:01:01.978370722 +0000 UTC m=+1550.236891730" watchObservedRunningTime="2025-11-21 16:01:01.982485493 +0000 UTC m=+1550.241006501" Nov 21 16:01:04 crc kubenswrapper[4967]: I1121 16:01:04.995865 4967 generic.go:334] "Generic (PLEG): container finished" podID="38e452a5-595e-4749-9310-48c09e18e32a" containerID="8a652e59b35c314e14eff7434e4924e47c4d8fe53cb6ff5eb64a3c8fdb9a4477" exitCode=0 Nov 21 16:01:04 crc kubenswrapper[4967]: I1121 16:01:04.995968 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395681-wbns7" event={"ID":"38e452a5-595e-4749-9310-48c09e18e32a","Type":"ContainerDied","Data":"8a652e59b35c314e14eff7434e4924e47c4d8fe53cb6ff5eb64a3c8fdb9a4477"} Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.413052 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.520223 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8wjv\" (UniqueName: \"kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv\") pod \"38e452a5-595e-4749-9310-48c09e18e32a\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.520729 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data\") pod \"38e452a5-595e-4749-9310-48c09e18e32a\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.520817 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle\") pod \"38e452a5-595e-4749-9310-48c09e18e32a\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.520849 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys\") pod \"38e452a5-595e-4749-9310-48c09e18e32a\" (UID: \"38e452a5-595e-4749-9310-48c09e18e32a\") " Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.526771 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv" (OuterVolumeSpecName: "kube-api-access-f8wjv") pod "38e452a5-595e-4749-9310-48c09e18e32a" (UID: "38e452a5-595e-4749-9310-48c09e18e32a"). InnerVolumeSpecName "kube-api-access-f8wjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.528010 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "38e452a5-595e-4749-9310-48c09e18e32a" (UID: "38e452a5-595e-4749-9310-48c09e18e32a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.562248 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38e452a5-595e-4749-9310-48c09e18e32a" (UID: "38e452a5-595e-4749-9310-48c09e18e32a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.587680 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data" (OuterVolumeSpecName: "config-data") pod "38e452a5-595e-4749-9310-48c09e18e32a" (UID: "38e452a5-595e-4749-9310-48c09e18e32a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.624612 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.624657 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.624672 4967 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38e452a5-595e-4749-9310-48c09e18e32a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:06 crc kubenswrapper[4967]: I1121 16:01:06.624684 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8wjv\" (UniqueName: \"kubernetes.io/projected/38e452a5-595e-4749-9310-48c09e18e32a-kube-api-access-f8wjv\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:07 crc kubenswrapper[4967]: I1121 16:01:07.021658 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395681-wbns7" event={"ID":"38e452a5-595e-4749-9310-48c09e18e32a","Type":"ContainerDied","Data":"8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8"} Nov 21 16:01:07 crc kubenswrapper[4967]: I1121 16:01:07.021713 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395681-wbns7" Nov 21 16:01:07 crc kubenswrapper[4967]: I1121 16:01:07.022008 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bd8e0577c50a9a2f1c69299126d3e0d810708dbd90665be784a86ce4ceea5d8" Nov 21 16:01:08 crc kubenswrapper[4967]: I1121 16:01:08.999223 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d75f767dc-9kxh6" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.073907 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.074226 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="dnsmasq-dns" containerID="cri-o://313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0" gracePeriod=10 Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.670091 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693070 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693169 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z5d2\" (UniqueName: \"kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693233 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693268 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693292 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.693380 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.694218 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam\") pod \"66456da0-1dc2-4eb8-8608-cb314c66a628\" (UID: \"66456da0-1dc2-4eb8-8608-cb314c66a628\") " Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.704764 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2" (OuterVolumeSpecName: "kube-api-access-9z5d2") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "kube-api-access-9z5d2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.774090 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.775806 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.781485 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.791072 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.801529 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.802896 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config" (OuterVolumeSpecName: "config") pod "66456da0-1dc2-4eb8-8608-cb314c66a628" (UID: "66456da0-1dc2-4eb8-8608-cb314c66a628"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805373 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9z5d2\" (UniqueName: \"kubernetes.io/projected/66456da0-1dc2-4eb8-8608-cb314c66a628-kube-api-access-9z5d2\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805416 4967 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805431 4967 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805442 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805450 4967 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805460 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:09 crc kubenswrapper[4967]: I1121 16:01:09.805468 4967 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66456da0-1dc2-4eb8-8608-cb314c66a628-config\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.052514 4967 generic.go:334] "Generic (PLEG): container finished" podID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerID="313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0" exitCode=0 Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.052557 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" event={"ID":"66456da0-1dc2-4eb8-8608-cb314c66a628","Type":"ContainerDied","Data":"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0"} Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.052583 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" event={"ID":"66456da0-1dc2-4eb8-8608-cb314c66a628","Type":"ContainerDied","Data":"d34251f072f128f86bef804e488f57cb3fa6840b128c3c44cbda9ff980fbe5cb"} Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.052600 4967 scope.go:117] "RemoveContainer" containerID="313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.052638 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-gnv7w" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.078487 4967 scope.go:117] "RemoveContainer" containerID="baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.086779 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.099374 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-gnv7w"] Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.109006 4967 scope.go:117] "RemoveContainer" containerID="313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0" Nov 21 16:01:10 crc kubenswrapper[4967]: E1121 16:01:10.109605 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0\": container with ID starting with 313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0 not found: ID does not exist" containerID="313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.109751 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0"} err="failed to get container status \"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0\": rpc error: code = NotFound desc = could not find container \"313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0\": container with ID starting with 313c86f630d54969376ba4975f9f0b69acfb425f680af3015c80776f359433e0 not found: ID does not exist" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.109882 4967 scope.go:117] "RemoveContainer" containerID="baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd" Nov 21 16:01:10 crc kubenswrapper[4967]: E1121 16:01:10.110736 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd\": container with ID starting with baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd not found: ID does not exist" containerID="baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.110852 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd"} err="failed to get container status \"baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd\": rpc error: code = NotFound desc = could not find container \"baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd\": container with ID starting with baa55a1cec753f2484ff5c656c4e75853c9bdccd7df355ee398cbac04fc91bcd not found: ID does not exist" Nov 21 16:01:10 crc kubenswrapper[4967]: I1121 16:01:10.551140 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" path="/var/lib/kubelet/pods/66456da0-1dc2-4eb8-8608-cb314c66a628/volumes" Nov 21 16:01:16 crc kubenswrapper[4967]: I1121 16:01:16.522091 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:01:16 crc kubenswrapper[4967]: I1121 16:01:16.522683 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:01:19 crc kubenswrapper[4967]: I1121 16:01:19.428991 4967 scope.go:117] "RemoveContainer" containerID="f1740d83b1d7b6bd9ceb629dfc0ce7360522feb97d85fc0b3f71e0479c90ecb9" Nov 21 16:01:20 crc kubenswrapper[4967]: I1121 16:01:20.162788 4967 generic.go:334] "Generic (PLEG): container finished" podID="235ca898-447b-4df0-9aef-3bf2bc1719ce" containerID="6e2a1f0b1c9eb5d66883bef8afd584f902625af39ff05c45d00207996ebb681d" exitCode=0 Nov 21 16:01:20 crc kubenswrapper[4967]: I1121 16:01:20.162840 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"235ca898-447b-4df0-9aef-3bf2bc1719ce","Type":"ContainerDied","Data":"6e2a1f0b1c9eb5d66883bef8afd584f902625af39ff05c45d00207996ebb681d"} Nov 21 16:01:21 crc kubenswrapper[4967]: I1121 16:01:21.175150 4967 generic.go:334] "Generic (PLEG): container finished" podID="2d6d9318-48b9-4b12-9532-2c449dd948a6" containerID="e6352c76ce78558116e56c9bef48fa3af58a7e1f7f5a94bee6397c427ccc8ba1" exitCode=0 Nov 21 16:01:21 crc kubenswrapper[4967]: I1121 16:01:21.175240 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2d6d9318-48b9-4b12-9532-2c449dd948a6","Type":"ContainerDied","Data":"e6352c76ce78558116e56c9bef48fa3af58a7e1f7f5a94bee6397c427ccc8ba1"} Nov 21 16:01:21 crc kubenswrapper[4967]: I1121 16:01:21.177897 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"235ca898-447b-4df0-9aef-3bf2bc1719ce","Type":"ContainerStarted","Data":"deec553d6d03090f47ee1dd37cabbb9eb2f39aa0f06e0571e101389f75372e55"} Nov 21 16:01:21 crc kubenswrapper[4967]: I1121 16:01:21.178088 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 16:01:21 crc kubenswrapper[4967]: I1121 16:01:21.236113 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.236088776 podStartE2EDuration="37.236088776s" podCreationTimestamp="2025-11-21 16:00:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:01:21.227148413 +0000 UTC m=+1569.485669411" watchObservedRunningTime="2025-11-21 16:01:21.236088776 +0000 UTC m=+1569.494609784" Nov 21 16:01:22 crc kubenswrapper[4967]: I1121 16:01:22.189556 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2d6d9318-48b9-4b12-9532-2c449dd948a6","Type":"ContainerStarted","Data":"8f14280f6b458a1ea71ad853d904a7ef2f36a17383194b86543321e367b505a5"} Nov 21 16:01:22 crc kubenswrapper[4967]: I1121 16:01:22.190237 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:01:22 crc kubenswrapper[4967]: I1121 16:01:22.225740 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.225719727 podStartE2EDuration="37.225719727s" podCreationTimestamp="2025-11-21 16:00:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:01:22.211453857 +0000 UTC m=+1570.469974875" watchObservedRunningTime="2025-11-21 16:01:22.225719727 +0000 UTC m=+1570.484240745" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.874625 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r"] Nov 21 16:01:23 crc kubenswrapper[4967]: E1121 16:01:23.876715 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="dnsmasq-dns" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.876744 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="dnsmasq-dns" Nov 21 16:01:23 crc kubenswrapper[4967]: E1121 16:01:23.876760 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e452a5-595e-4749-9310-48c09e18e32a" containerName="keystone-cron" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.876769 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e452a5-595e-4749-9310-48c09e18e32a" containerName="keystone-cron" Nov 21 16:01:23 crc kubenswrapper[4967]: E1121 16:01:23.876812 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="init" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.876821 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="init" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.877097 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="66456da0-1dc2-4eb8-8608-cb314c66a628" containerName="dnsmasq-dns" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.877135 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e452a5-595e-4749-9310-48c09e18e32a" containerName="keystone-cron" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.878063 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.881099 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.881439 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.881762 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.881954 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:01:23 crc kubenswrapper[4967]: I1121 16:01:23.906903 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r"] Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.046087 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.046516 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h7vf\" (UniqueName: \"kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.046910 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.047090 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.149043 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.149107 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h7vf\" (UniqueName: \"kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.149276 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.149377 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.158493 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.158498 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.159284 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.175795 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h7vf\" (UniqueName: \"kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:24 crc kubenswrapper[4967]: I1121 16:01:24.203176 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:25 crc kubenswrapper[4967]: I1121 16:01:25.167028 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r"] Nov 21 16:01:25 crc kubenswrapper[4967]: I1121 16:01:25.170177 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:01:25 crc kubenswrapper[4967]: I1121 16:01:25.222939 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" event={"ID":"cc803d7b-a544-4388-b5c0-3debc0789e8e","Type":"ContainerStarted","Data":"2abef856989634f55bb1cca40abf80801a11b0d389e961125196437caaf1d7b7"} Nov 21 16:01:34 crc kubenswrapper[4967]: I1121 16:01:34.456526 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 16:01:36 crc kubenswrapper[4967]: I1121 16:01:36.163516 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 16:01:40 crc kubenswrapper[4967]: I1121 16:01:40.421275 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" event={"ID":"cc803d7b-a544-4388-b5c0-3debc0789e8e","Type":"ContainerStarted","Data":"db0189bb554b6710504bf42a1f6f6ca6b73b6c4e856a15a89c9d847a002e373e"} Nov 21 16:01:40 crc kubenswrapper[4967]: I1121 16:01:40.440422 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" podStartSLOduration=3.180674406 podStartE2EDuration="17.44040118s" podCreationTimestamp="2025-11-21 16:01:23 +0000 UTC" firstStartedPulling="2025-11-21 16:01:25.169996876 +0000 UTC m=+1573.428517884" lastFinishedPulling="2025-11-21 16:01:39.42972365 +0000 UTC m=+1587.688244658" observedRunningTime="2025-11-21 16:01:40.433655612 +0000 UTC m=+1588.692176630" watchObservedRunningTime="2025-11-21 16:01:40.44040118 +0000 UTC m=+1588.698922188" Nov 21 16:01:46 crc kubenswrapper[4967]: I1121 16:01:46.522279 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:01:46 crc kubenswrapper[4967]: I1121 16:01:46.523452 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:01:51 crc kubenswrapper[4967]: I1121 16:01:51.555490 4967 generic.go:334] "Generic (PLEG): container finished" podID="cc803d7b-a544-4388-b5c0-3debc0789e8e" containerID="db0189bb554b6710504bf42a1f6f6ca6b73b6c4e856a15a89c9d847a002e373e" exitCode=0 Nov 21 16:01:51 crc kubenswrapper[4967]: I1121 16:01:51.555522 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" event={"ID":"cc803d7b-a544-4388-b5c0-3debc0789e8e","Type":"ContainerDied","Data":"db0189bb554b6710504bf42a1f6f6ca6b73b6c4e856a15a89c9d847a002e373e"} Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.126617 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.152714 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key\") pod \"cc803d7b-a544-4388-b5c0-3debc0789e8e\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.152808 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle\") pod \"cc803d7b-a544-4388-b5c0-3debc0789e8e\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.152989 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory\") pod \"cc803d7b-a544-4388-b5c0-3debc0789e8e\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.153112 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h7vf\" (UniqueName: \"kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf\") pod \"cc803d7b-a544-4388-b5c0-3debc0789e8e\" (UID: \"cc803d7b-a544-4388-b5c0-3debc0789e8e\") " Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.161461 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cc803d7b-a544-4388-b5c0-3debc0789e8e" (UID: "cc803d7b-a544-4388-b5c0-3debc0789e8e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.162697 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf" (OuterVolumeSpecName: "kube-api-access-7h7vf") pod "cc803d7b-a544-4388-b5c0-3debc0789e8e" (UID: "cc803d7b-a544-4388-b5c0-3debc0789e8e"). InnerVolumeSpecName "kube-api-access-7h7vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.190453 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cc803d7b-a544-4388-b5c0-3debc0789e8e" (UID: "cc803d7b-a544-4388-b5c0-3debc0789e8e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.199451 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory" (OuterVolumeSpecName: "inventory") pod "cc803d7b-a544-4388-b5c0-3debc0789e8e" (UID: "cc803d7b-a544-4388-b5c0-3debc0789e8e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.256749 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h7vf\" (UniqueName: \"kubernetes.io/projected/cc803d7b-a544-4388-b5c0-3debc0789e8e-kube-api-access-7h7vf\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.256821 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.256835 4967 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.256848 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc803d7b-a544-4388-b5c0-3debc0789e8e-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.580628 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" event={"ID":"cc803d7b-a544-4388-b5c0-3debc0789e8e","Type":"ContainerDied","Data":"2abef856989634f55bb1cca40abf80801a11b0d389e961125196437caaf1d7b7"} Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.581026 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2abef856989634f55bb1cca40abf80801a11b0d389e961125196437caaf1d7b7" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.580678 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.766284 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf"] Nov 21 16:01:53 crc kubenswrapper[4967]: E1121 16:01:53.767143 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc803d7b-a544-4388-b5c0-3debc0789e8e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.767291 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc803d7b-a544-4388-b5c0-3debc0789e8e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.767619 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc803d7b-a544-4388-b5c0-3debc0789e8e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.768740 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.771029 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.773520 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.773856 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.773854 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.778368 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf"] Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.870896 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v57v5\" (UniqueName: \"kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.871438 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.871548 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.973827 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.973881 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.974022 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v57v5\" (UniqueName: \"kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.979034 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.979388 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:53 crc kubenswrapper[4967]: I1121 16:01:53.993189 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v57v5\" (UniqueName: \"kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-xlrwf\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:54 crc kubenswrapper[4967]: I1121 16:01:54.088647 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:01:54 crc kubenswrapper[4967]: I1121 16:01:54.619599 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf"] Nov 21 16:01:55 crc kubenswrapper[4967]: I1121 16:01:55.604491 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" event={"ID":"4af462cc-74bb-4ef1-bece-cd54d27bb7ef","Type":"ContainerStarted","Data":"afb7d5a6fdb18bd98043f0997061e84e9af4c1cd623e1a2272b90e6f57f4f6c3"} Nov 21 16:01:56 crc kubenswrapper[4967]: I1121 16:01:56.616328 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" event={"ID":"4af462cc-74bb-4ef1-bece-cd54d27bb7ef","Type":"ContainerStarted","Data":"ae7f45b116947d118bbfa1fd25ec48c1a80c5018e0f44ba26ebeda85f7a0d0a6"} Nov 21 16:01:56 crc kubenswrapper[4967]: I1121 16:01:56.642259 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" podStartSLOduration=2.141014523 podStartE2EDuration="3.642240493s" podCreationTimestamp="2025-11-21 16:01:53 +0000 UTC" firstStartedPulling="2025-11-21 16:01:54.640225663 +0000 UTC m=+1602.898746671" lastFinishedPulling="2025-11-21 16:01:56.141451623 +0000 UTC m=+1604.399972641" observedRunningTime="2025-11-21 16:01:56.634616839 +0000 UTC m=+1604.893137857" watchObservedRunningTime="2025-11-21 16:01:56.642240493 +0000 UTC m=+1604.900761501" Nov 21 16:02:00 crc kubenswrapper[4967]: I1121 16:02:00.676683 4967 generic.go:334] "Generic (PLEG): container finished" podID="4af462cc-74bb-4ef1-bece-cd54d27bb7ef" containerID="ae7f45b116947d118bbfa1fd25ec48c1a80c5018e0f44ba26ebeda85f7a0d0a6" exitCode=0 Nov 21 16:02:00 crc kubenswrapper[4967]: I1121 16:02:00.676823 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" event={"ID":"4af462cc-74bb-4ef1-bece-cd54d27bb7ef","Type":"ContainerDied","Data":"ae7f45b116947d118bbfa1fd25ec48c1a80c5018e0f44ba26ebeda85f7a0d0a6"} Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.153654 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.341761 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key\") pod \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.342004 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory\") pod \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.342112 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v57v5\" (UniqueName: \"kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5\") pod \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\" (UID: \"4af462cc-74bb-4ef1-bece-cd54d27bb7ef\") " Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.351016 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5" (OuterVolumeSpecName: "kube-api-access-v57v5") pod "4af462cc-74bb-4ef1-bece-cd54d27bb7ef" (UID: "4af462cc-74bb-4ef1-bece-cd54d27bb7ef"). InnerVolumeSpecName "kube-api-access-v57v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.379810 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory" (OuterVolumeSpecName: "inventory") pod "4af462cc-74bb-4ef1-bece-cd54d27bb7ef" (UID: "4af462cc-74bb-4ef1-bece-cd54d27bb7ef"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.380300 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4af462cc-74bb-4ef1-bece-cd54d27bb7ef" (UID: "4af462cc-74bb-4ef1-bece-cd54d27bb7ef"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.445934 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.445978 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.445999 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v57v5\" (UniqueName: \"kubernetes.io/projected/4af462cc-74bb-4ef1-bece-cd54d27bb7ef-kube-api-access-v57v5\") on node \"crc\" DevicePath \"\"" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.701699 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" event={"ID":"4af462cc-74bb-4ef1-bece-cd54d27bb7ef","Type":"ContainerDied","Data":"afb7d5a6fdb18bd98043f0997061e84e9af4c1cd623e1a2272b90e6f57f4f6c3"} Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.701736 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afb7d5a6fdb18bd98043f0997061e84e9af4c1cd623e1a2272b90e6f57f4f6c3" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.701742 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-xlrwf" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.848863 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw"] Nov 21 16:02:02 crc kubenswrapper[4967]: E1121 16:02:02.849836 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4af462cc-74bb-4ef1-bece-cd54d27bb7ef" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.849857 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4af462cc-74bb-4ef1-bece-cd54d27bb7ef" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.850200 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4af462cc-74bb-4ef1-bece-cd54d27bb7ef" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.851392 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.853483 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.854882 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.854990 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.855086 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.856412 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.856499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9p7g\" (UniqueName: \"kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.856614 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.856680 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.865868 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw"] Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.959101 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.959188 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.959252 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.959345 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9p7g\" (UniqueName: \"kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.963456 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.963781 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.964284 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:02 crc kubenswrapper[4967]: I1121 16:02:02.975807 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9p7g\" (UniqueName: \"kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:03 crc kubenswrapper[4967]: I1121 16:02:03.174110 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:02:03 crc kubenswrapper[4967]: I1121 16:02:03.734374 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw"] Nov 21 16:02:04 crc kubenswrapper[4967]: I1121 16:02:04.732023 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" event={"ID":"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2","Type":"ContainerStarted","Data":"ccd39e861facca5e6638718f8932c9aafd11fb53a33e820c7643c93001fce729"} Nov 21 16:02:04 crc kubenswrapper[4967]: I1121 16:02:04.732501 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" event={"ID":"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2","Type":"ContainerStarted","Data":"9c88f48c83f81a679548face84965316edaf08ac52b6258d962f21d97298097e"} Nov 21 16:02:04 crc kubenswrapper[4967]: I1121 16:02:04.755216 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" podStartSLOduration=2.334485844 podStartE2EDuration="2.75519871s" podCreationTimestamp="2025-11-21 16:02:02 +0000 UTC" firstStartedPulling="2025-11-21 16:02:03.72781637 +0000 UTC m=+1611.986337378" lastFinishedPulling="2025-11-21 16:02:04.148529236 +0000 UTC m=+1612.407050244" observedRunningTime="2025-11-21 16:02:04.750211423 +0000 UTC m=+1613.008732431" watchObservedRunningTime="2025-11-21 16:02:04.75519871 +0000 UTC m=+1613.013719718" Nov 21 16:02:16 crc kubenswrapper[4967]: I1121 16:02:16.522186 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:02:16 crc kubenswrapper[4967]: I1121 16:02:16.522771 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:02:16 crc kubenswrapper[4967]: I1121 16:02:16.523381 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:02:16 crc kubenswrapper[4967]: I1121 16:02:16.524300 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:02:16 crc kubenswrapper[4967]: I1121 16:02:16.524387 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" gracePeriod=600 Nov 21 16:02:17 crc kubenswrapper[4967]: E1121 16:02:17.235713 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:02:17 crc kubenswrapper[4967]: I1121 16:02:17.879902 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" exitCode=0 Nov 21 16:02:17 crc kubenswrapper[4967]: I1121 16:02:17.879950 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a"} Nov 21 16:02:17 crc kubenswrapper[4967]: I1121 16:02:17.880029 4967 scope.go:117] "RemoveContainer" containerID="f09ca3cd3c7764210d3de0eccc7f7854f17def33e58cd06023ecb248dfe5b054" Nov 21 16:02:17 crc kubenswrapper[4967]: I1121 16:02:17.880960 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:02:17 crc kubenswrapper[4967]: E1121 16:02:17.881334 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:02:19 crc kubenswrapper[4967]: I1121 16:02:19.657907 4967 scope.go:117] "RemoveContainer" containerID="5b9bca81bb29bbba4965712b67772e9516b0de09b6b7075258575b7c8f627668" Nov 21 16:02:19 crc kubenswrapper[4967]: I1121 16:02:19.729802 4967 scope.go:117] "RemoveContainer" containerID="b90e64f0a84c9b72beaa4635b77929bc0f532a39d96eb6cb1d1d2d82efa5f075" Nov 21 16:02:19 crc kubenswrapper[4967]: I1121 16:02:19.863297 4967 scope.go:117] "RemoveContainer" containerID="874e5b6d9db34d44f43a3bd2057069b1e10198bf5cf524250d237bf0ce9ce8d7" Nov 21 16:02:30 crc kubenswrapper[4967]: I1121 16:02:30.537496 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:02:30 crc kubenswrapper[4967]: E1121 16:02:30.538399 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:02:43 crc kubenswrapper[4967]: I1121 16:02:43.537351 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:02:43 crc kubenswrapper[4967]: E1121 16:02:43.538836 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:02:54 crc kubenswrapper[4967]: I1121 16:02:54.536478 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:02:54 crc kubenswrapper[4967]: E1121 16:02:54.537373 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:03:06 crc kubenswrapper[4967]: I1121 16:03:06.536525 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:03:06 crc kubenswrapper[4967]: E1121 16:03:06.537731 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:03:17 crc kubenswrapper[4967]: I1121 16:03:17.537387 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:03:17 crc kubenswrapper[4967]: E1121 16:03:17.538912 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:03:19 crc kubenswrapper[4967]: I1121 16:03:19.943597 4967 scope.go:117] "RemoveContainer" containerID="b70af11e3e863da51b383110c505b812605a8d1ed939cad3719295564dd19018" Nov 21 16:03:19 crc kubenswrapper[4967]: I1121 16:03:19.983046 4967 scope.go:117] "RemoveContainer" containerID="020bef81047c8008782c5042de065aabb0805f186829f78f37a67754b692a572" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.003996 4967 scope.go:117] "RemoveContainer" containerID="7be10394e739b35ce1e663f218abef8e4a2ddd5a257751c4cd52cce365cd4b2b" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.033162 4967 scope.go:117] "RemoveContainer" containerID="6e7f14a0cd1f82e6eb7173240eba239b9bcd3aea7c98e320171fbe2c72645fe8" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.055490 4967 scope.go:117] "RemoveContainer" containerID="5ff7134e6aee4fbea9effdf90993ee33422c1178643d8badd53a1301364d5b9b" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.081568 4967 scope.go:117] "RemoveContainer" containerID="48812ecedfc0e84bf063bb60b551c2d4eb966005a62cd2248b1b1a54305a527d" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.106326 4967 scope.go:117] "RemoveContainer" containerID="42cda58e8125226d31e745936b86915184a64bc5b49a6e9322d2439fcfc09688" Nov 21 16:03:20 crc kubenswrapper[4967]: I1121 16:03:20.130534 4967 scope.go:117] "RemoveContainer" containerID="f025580bf98c93b57599b9e747243fb87b1d585e13ad663372503a7e72922eaf" Nov 21 16:03:30 crc kubenswrapper[4967]: I1121 16:03:30.537431 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:03:30 crc kubenswrapper[4967]: E1121 16:03:30.539246 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:03:43 crc kubenswrapper[4967]: I1121 16:03:43.536825 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:03:43 crc kubenswrapper[4967]: E1121 16:03:43.537578 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:03:55 crc kubenswrapper[4967]: I1121 16:03:55.537558 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:03:55 crc kubenswrapper[4967]: E1121 16:03:55.538368 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:04:08 crc kubenswrapper[4967]: I1121 16:04:08.536197 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:04:08 crc kubenswrapper[4967]: E1121 16:04:08.536920 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:04:20 crc kubenswrapper[4967]: I1121 16:04:20.248919 4967 scope.go:117] "RemoveContainer" containerID="f7a1bd11183d346ad6e79bc7a8d754b69b6b041030f59cf7215bcd7f277a9bae" Nov 21 16:04:20 crc kubenswrapper[4967]: I1121 16:04:20.287426 4967 scope.go:117] "RemoveContainer" containerID="b764bcf3115b4a314b250266927c14a33925a2d1a33ced19c9871713cdb99141" Nov 21 16:04:20 crc kubenswrapper[4967]: I1121 16:04:20.319426 4967 scope.go:117] "RemoveContainer" containerID="4b912915211357dc6ae2523ac847e8c974f536d20ec92b6d83bca803bc4da52f" Nov 21 16:04:20 crc kubenswrapper[4967]: I1121 16:04:20.347546 4967 scope.go:117] "RemoveContainer" containerID="d0d46dfbccb8c7577d72d35bff9ad542f713813525634b6e84df4446e99c4865" Nov 21 16:04:22 crc kubenswrapper[4967]: I1121 16:04:22.545568 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:04:22 crc kubenswrapper[4967]: E1121 16:04:22.546204 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:04:35 crc kubenswrapper[4967]: I1121 16:04:35.537067 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:04:35 crc kubenswrapper[4967]: E1121 16:04:35.537858 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.061370 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-81db-account-create-8qvgv"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.072486 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-8fnsz"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.083446 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-86c0-account-create-6xrqg"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.094278 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-hdqhx"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.104098 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-8fnsz"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.113874 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-hdqhx"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.124211 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-81db-account-create-8qvgv"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.133523 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-86c0-account-create-6xrqg"] Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.554947 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fb72003-35bf-4473-be52-303e57f5351d" path="/var/lib/kubelet/pods/6fb72003-35bf-4473-be52-303e57f5351d/volumes" Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.558384 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="704454f3-1078-44b7-b41f-3da332e4015f" path="/var/lib/kubelet/pods/704454f3-1078-44b7-b41f-3da332e4015f/volumes" Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.563810 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a44abc5-c95d-4bb5-ae69-1b770ee9fe56" path="/var/lib/kubelet/pods/8a44abc5-c95d-4bb5-ae69-1b770ee9fe56/volumes" Nov 21 16:04:46 crc kubenswrapper[4967]: I1121 16:04:46.568680 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e57707db-bae6-4223-968e-52d7ba80f7f2" path="/var/lib/kubelet/pods/e57707db-bae6-4223-968e-52d7ba80f7f2/volumes" Nov 21 16:04:47 crc kubenswrapper[4967]: I1121 16:04:47.536880 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:04:47 crc kubenswrapper[4967]: E1121 16:04:47.537192 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:04:49 crc kubenswrapper[4967]: I1121 16:04:49.037263 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-50c0-account-create-c5xnl"] Nov 21 16:04:49 crc kubenswrapper[4967]: I1121 16:04:49.049514 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kn45m"] Nov 21 16:04:49 crc kubenswrapper[4967]: I1121 16:04:49.061279 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-kn45m"] Nov 21 16:04:49 crc kubenswrapper[4967]: I1121 16:04:49.074763 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-50c0-account-create-c5xnl"] Nov 21 16:04:50 crc kubenswrapper[4967]: I1121 16:04:50.033009 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-xpqkr"] Nov 21 16:04:50 crc kubenswrapper[4967]: I1121 16:04:50.044093 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-xpqkr"] Nov 21 16:04:50 crc kubenswrapper[4967]: I1121 16:04:50.551571 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d966c29-248f-49a3-b5c8-e88deb1aa0d9" path="/var/lib/kubelet/pods/5d966c29-248f-49a3-b5c8-e88deb1aa0d9/volumes" Nov 21 16:04:50 crc kubenswrapper[4967]: I1121 16:04:50.555290 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e949c27-e236-4411-b306-eb9e7d3385f4" path="/var/lib/kubelet/pods/6e949c27-e236-4411-b306-eb9e7d3385f4/volumes" Nov 21 16:04:50 crc kubenswrapper[4967]: I1121 16:04:50.557164 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88694819-ea7a-48ee-89fb-c9df36ca33d9" path="/var/lib/kubelet/pods/88694819-ea7a-48ee-89fb-c9df36ca33d9/volumes" Nov 21 16:04:52 crc kubenswrapper[4967]: I1121 16:04:52.028336 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-d1ef-account-create-bpt7z"] Nov 21 16:04:52 crc kubenswrapper[4967]: I1121 16:04:52.039710 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-d1ef-account-create-bpt7z"] Nov 21 16:04:52 crc kubenswrapper[4967]: I1121 16:04:52.563772 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c59946-4e7a-45d0-8cd3-b7de14d8e8f5" path="/var/lib/kubelet/pods/22c59946-4e7a-45d0-8cd3-b7de14d8e8f5/volumes" Nov 21 16:04:55 crc kubenswrapper[4967]: I1121 16:04:55.029827 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s"] Nov 21 16:04:55 crc kubenswrapper[4967]: I1121 16:04:55.040913 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-vkb4s"] Nov 21 16:04:56 crc kubenswrapper[4967]: I1121 16:04:56.550255 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c78ece6b-57df-4cf9-add7-6c8ca602d7a8" path="/var/lib/kubelet/pods/c78ece6b-57df-4cf9-add7-6c8ca602d7a8/volumes" Nov 21 16:04:57 crc kubenswrapper[4967]: I1121 16:04:57.032906 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-fa6c-account-create-x8pf7"] Nov 21 16:04:57 crc kubenswrapper[4967]: I1121 16:04:57.044561 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-fa6c-account-create-x8pf7"] Nov 21 16:04:58 crc kubenswrapper[4967]: I1121 16:04:58.559614 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7775b65a-c6fb-4eda-9006-182b889c4a0b" path="/var/lib/kubelet/pods/7775b65a-c6fb-4eda-9006-182b889c4a0b/volumes" Nov 21 16:05:00 crc kubenswrapper[4967]: I1121 16:05:00.537978 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:05:00 crc kubenswrapper[4967]: E1121 16:05:00.538940 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:05:09 crc kubenswrapper[4967]: I1121 16:05:09.799453 4967 generic.go:334] "Generic (PLEG): container finished" podID="b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" containerID="ccd39e861facca5e6638718f8932c9aafd11fb53a33e820c7643c93001fce729" exitCode=0 Nov 21 16:05:09 crc kubenswrapper[4967]: I1121 16:05:09.799541 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" event={"ID":"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2","Type":"ContainerDied","Data":"ccd39e861facca5e6638718f8932c9aafd11fb53a33e820c7643c93001fce729"} Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.297661 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.343847 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key\") pod \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.343974 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory\") pod \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.344177 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9p7g\" (UniqueName: \"kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g\") pod \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.344269 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle\") pod \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\" (UID: \"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2\") " Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.350202 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" (UID: "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.350637 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g" (OuterVolumeSpecName: "kube-api-access-r9p7g") pod "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" (UID: "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2"). InnerVolumeSpecName "kube-api-access-r9p7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.381599 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" (UID: "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.388901 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory" (OuterVolumeSpecName: "inventory") pod "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" (UID: "b43aaf4b-3291-4e5a-b01d-ee1365c62ab2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.447640 4967 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.447678 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.447687 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.447697 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9p7g\" (UniqueName: \"kubernetes.io/projected/b43aaf4b-3291-4e5a-b01d-ee1365c62ab2-kube-api-access-r9p7g\") on node \"crc\" DevicePath \"\"" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.821177 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" event={"ID":"b43aaf4b-3291-4e5a-b01d-ee1365c62ab2","Type":"ContainerDied","Data":"9c88f48c83f81a679548face84965316edaf08ac52b6258d962f21d97298097e"} Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.821217 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c88f48c83f81a679548face84965316edaf08ac52b6258d962f21d97298097e" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.821260 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.913041 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw"] Nov 21 16:05:11 crc kubenswrapper[4967]: E1121 16:05:11.913696 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.913721 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.914059 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b43aaf4b-3291-4e5a-b01d-ee1365c62ab2" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.914910 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.920858 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.924165 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.924410 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.925845 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.932514 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw"] Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.960082 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtngm\" (UniqueName: \"kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.960171 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:11 crc kubenswrapper[4967]: I1121 16:05:11.960249 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.062553 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.062665 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.062895 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtngm\" (UniqueName: \"kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.069144 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.069686 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.079414 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtngm\" (UniqueName: \"kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.242017 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.765331 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw"] Nov 21 16:05:12 crc kubenswrapper[4967]: I1121 16:05:12.836298 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" event={"ID":"66c72c31-e791-478b-bbfc-3ba795c580e9","Type":"ContainerStarted","Data":"46b57ed09d91e603c1836d569ba0e78cba74057c9399086e09fa50e1fcc9b648"} Nov 21 16:05:13 crc kubenswrapper[4967]: I1121 16:05:13.399341 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:05:13 crc kubenswrapper[4967]: I1121 16:05:13.848802 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" event={"ID":"66c72c31-e791-478b-bbfc-3ba795c580e9","Type":"ContainerStarted","Data":"41a2b29b6f66f453debd482de467a7113dcc3f883700762578a5f22873ee583e"} Nov 21 16:05:13 crc kubenswrapper[4967]: I1121 16:05:13.871872 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" podStartSLOduration=2.242904495 podStartE2EDuration="2.871858433s" podCreationTimestamp="2025-11-21 16:05:11 +0000 UTC" firstStartedPulling="2025-11-21 16:05:12.76701177 +0000 UTC m=+1801.025532778" lastFinishedPulling="2025-11-21 16:05:13.395965708 +0000 UTC m=+1801.654486716" observedRunningTime="2025-11-21 16:05:13.861792104 +0000 UTC m=+1802.120313112" watchObservedRunningTime="2025-11-21 16:05:13.871858433 +0000 UTC m=+1802.130379441" Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.040352 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-twljx"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.054386 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-fca9-account-create-2qmtv"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.067402 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-1a58-account-create-j6kfx"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.080553 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-twljx"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.090749 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-fca9-account-create-2qmtv"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.102801 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-1a58-account-create-j6kfx"] Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.554915 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3813cbde-1076-466c-b72a-94ffa3741ef1" path="/var/lib/kubelet/pods/3813cbde-1076-466c-b72a-94ffa3741ef1/volumes" Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.560998 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62996c4f-2e2b-4e94-b60d-1c2962cd7e3f" path="/var/lib/kubelet/pods/62996c4f-2e2b-4e94-b60d-1c2962cd7e3f/volumes" Nov 21 16:05:14 crc kubenswrapper[4967]: I1121 16:05:14.566655 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c5930c6-1d89-4ef4-bd96-f290177d2aff" path="/var/lib/kubelet/pods/9c5930c6-1d89-4ef4-bd96-f290177d2aff/volumes" Nov 21 16:05:15 crc kubenswrapper[4967]: I1121 16:05:15.536464 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:05:15 crc kubenswrapper[4967]: E1121 16:05:15.536729 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.062925 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-d7rdb"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.079204 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-msr6l"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.091686 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-r44s6"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.101707 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-d7rdb"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.114618 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-e255-account-create-t44dj"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.128050 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-r44s6"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.139331 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-e255-account-create-t44dj"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.158629 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-msr6l"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.167951 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5922-account-create-sq5cw"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.177380 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5922-account-create-sq5cw"] Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.554432 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1" path="/var/lib/kubelet/pods/5c5c8d5c-82f8-4b8c-91fe-3d2065ff8ab1/volumes" Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.558855 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9505ac95-12b0-426a-a2b5-42f13ec2fad8" path="/var/lib/kubelet/pods/9505ac95-12b0-426a-a2b5-42f13ec2fad8/volumes" Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.566585 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a098c7dd-1c69-4c44-a9cb-65c81b00e9e1" path="/var/lib/kubelet/pods/a098c7dd-1c69-4c44-a9cb-65c81b00e9e1/volumes" Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.572570 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab58e136-893e-4b70-a0a3-d259b234dfcc" path="/var/lib/kubelet/pods/ab58e136-893e-4b70-a0a3-d259b234dfcc/volumes" Nov 21 16:05:18 crc kubenswrapper[4967]: I1121 16:05:18.578522 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6e85c34-bd76-47aa-b120-3410545d53f0" path="/var/lib/kubelet/pods/b6e85c34-bd76-47aa-b120-3410545d53f0/volumes" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.408484 4967 scope.go:117] "RemoveContainer" containerID="d56e59317bc94138f845ca502ab20844e78a940e21624aed790ce50c0adbfaea" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.455786 4967 scope.go:117] "RemoveContainer" containerID="15bd5691dc70002b55da786e44cc07269318059766d9c6d6f2d7812d3358a6b8" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.509440 4967 scope.go:117] "RemoveContainer" containerID="08d586ead6747c4d5608371fd684665888e0b4eb12b61b2ed59f0a170ff1f5a7" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.528783 4967 scope.go:117] "RemoveContainer" containerID="36df21bc6423db1db74e4b92fc50f81ee7f70405d8d51b384f53ebefe73dbd1d" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.557369 4967 scope.go:117] "RemoveContainer" containerID="f215ef7306694e91437ae9a98b304b83b795e4350243b850ddab657b4ea48bf3" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.604207 4967 scope.go:117] "RemoveContainer" containerID="760965ddb3cf68f6983ffb2be89964db76717533fbde37691c818aae2f99164a" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.630293 4967 scope.go:117] "RemoveContainer" containerID="eacd3d8499a4eb3cd0475fa343a858525c67ec738545e353f5d4a3146f204012" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.676334 4967 scope.go:117] "RemoveContainer" containerID="842577b1cfc0a47d3fdb8501eeae12483fbad67daf8abbf64d8ae71dd04a257f" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.728350 4967 scope.go:117] "RemoveContainer" containerID="132ddaec39109b192e83e49b225bd554f8ce105e338909116db1e2b95912817f" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.787559 4967 scope.go:117] "RemoveContainer" containerID="7c8b315206d8e0d39c4645f2297599d8cf0bfe476fe6438ee0755944f214c5bc" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.815032 4967 scope.go:117] "RemoveContainer" containerID="30cc6efdc6c30bd7d5908feee24e8269ea60a7591593733bcf2bae84e41ad947" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.854360 4967 scope.go:117] "RemoveContainer" containerID="5c15e2cf6c73812b7e054641ec4ffd9378713c9efb432d4ae6d95abfeb7e484b" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.881424 4967 scope.go:117] "RemoveContainer" containerID="c556a57c9036f81a8c74981a72d6a4aeccd59b9db913adb2455b759cf8afc989" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.909885 4967 scope.go:117] "RemoveContainer" containerID="e38ed347585b722223ea42a57aaa3839160fa5237e37d6904b839b60f7b970a0" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.936099 4967 scope.go:117] "RemoveContainer" containerID="05e9a90e42b8160efd190cfe43fc2add63e216db73cb1ba55743cac7188cb0f4" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.959943 4967 scope.go:117] "RemoveContainer" containerID="ea9e83c624b7de77e3be2211d980926aba17815ae39534984390b145bae746a4" Nov 21 16:05:20 crc kubenswrapper[4967]: I1121 16:05:20.990567 4967 scope.go:117] "RemoveContainer" containerID="52ec31461f5f536998db68b1facaafdaf884171e3209f16130e681ab88af74ab" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.023583 4967 scope.go:117] "RemoveContainer" containerID="6a6e1e3b41c90eec8fb9782c755a74d169a44531655e0544b2ae43dd80c5edfb" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.058809 4967 scope.go:117] "RemoveContainer" containerID="1abeadf52cc378302c89526798d9d483a176622c177aafef726e98e975471f59" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.081712 4967 scope.go:117] "RemoveContainer" containerID="cec0cd7e9016c4fcc4212cc41c4e9cdfcbad4f5c2b211832297965a7dfff8952" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.104726 4967 scope.go:117] "RemoveContainer" containerID="94563cc8df986eb98b7cbee5998d256323f19ea02b9887a26782a03fef987cd2" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.128186 4967 scope.go:117] "RemoveContainer" containerID="079bad52f69af24d18537da76e6ce7e98d45bdd5e8cbaced24e2ed725adf5a42" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.149266 4967 scope.go:117] "RemoveContainer" containerID="10f259730fb38a71b2cdccd1ef36b24320560377be351357fa3351e6f5db57f9" Nov 21 16:05:21 crc kubenswrapper[4967]: I1121 16:05:21.178266 4967 scope.go:117] "RemoveContainer" containerID="09b473eb28b1f48ed986c45f476789a9586fef1ac7c6912423b32319148c0652" Nov 21 16:05:25 crc kubenswrapper[4967]: I1121 16:05:25.028591 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-gkkl8"] Nov 21 16:05:25 crc kubenswrapper[4967]: I1121 16:05:25.038208 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-gkkl8"] Nov 21 16:05:26 crc kubenswrapper[4967]: I1121 16:05:26.554300 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dab33d9-b2f0-4884-97ba-047b7772da9a" path="/var/lib/kubelet/pods/0dab33d9-b2f0-4884-97ba-047b7772da9a/volumes" Nov 21 16:05:27 crc kubenswrapper[4967]: I1121 16:05:27.061485 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-9fbft"] Nov 21 16:05:27 crc kubenswrapper[4967]: I1121 16:05:27.072557 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-9fbft"] Nov 21 16:05:28 crc kubenswrapper[4967]: I1121 16:05:28.536867 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:05:28 crc kubenswrapper[4967]: E1121 16:05:28.537183 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:05:28 crc kubenswrapper[4967]: I1121 16:05:28.551536 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc07e264-27b3-4f82-b96e-04ef32de4c2c" path="/var/lib/kubelet/pods/fc07e264-27b3-4f82-b96e-04ef32de4c2c/volumes" Nov 21 16:05:41 crc kubenswrapper[4967]: I1121 16:05:41.536526 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:05:41 crc kubenswrapper[4967]: E1121 16:05:41.537401 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:05:56 crc kubenswrapper[4967]: I1121 16:05:56.545028 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:05:56 crc kubenswrapper[4967]: E1121 16:05:56.546578 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:06:10 crc kubenswrapper[4967]: I1121 16:06:10.536437 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:06:10 crc kubenswrapper[4967]: E1121 16:06:10.537166 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:06:15 crc kubenswrapper[4967]: I1121 16:06:15.348096 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-794fb7d789-mkxk2" podUID="9488c46d-11de-4819-9784-e32e3893a5d9" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 21 16:06:21 crc kubenswrapper[4967]: I1121 16:06:21.584097 4967 scope.go:117] "RemoveContainer" containerID="089119f5d74217fbf2b924d4b97a4f23c8fb5cd7d1e20ba685fa6d34e87cadc3" Nov 21 16:06:21 crc kubenswrapper[4967]: I1121 16:06:21.630352 4967 scope.go:117] "RemoveContainer" containerID="2d2364867360eb071358a2df33122fcfac9db14bb17e08adcd21243d636a2259" Nov 21 16:06:25 crc kubenswrapper[4967]: I1121 16:06:25.048068 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-z4nb7"] Nov 21 16:06:25 crc kubenswrapper[4967]: I1121 16:06:25.062391 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-z4nb7"] Nov 21 16:06:25 crc kubenswrapper[4967]: I1121 16:06:25.536526 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:06:25 crc kubenswrapper[4967]: E1121 16:06:25.536967 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:06:26 crc kubenswrapper[4967]: I1121 16:06:26.550388 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0acc480b-ec94-4ce1-af6f-d20f9e5f45c2" path="/var/lib/kubelet/pods/0acc480b-ec94-4ce1-af6f-d20f9e5f45c2/volumes" Nov 21 16:06:29 crc kubenswrapper[4967]: I1121 16:06:29.035707 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-zb9np"] Nov 21 16:06:29 crc kubenswrapper[4967]: I1121 16:06:29.045812 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-zb9np"] Nov 21 16:06:30 crc kubenswrapper[4967]: I1121 16:06:30.550097 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d0b548b-65d2-496b-a8f0-5556b5e9760e" path="/var/lib/kubelet/pods/6d0b548b-65d2-496b-a8f0-5556b5e9760e/volumes" Nov 21 16:06:36 crc kubenswrapper[4967]: I1121 16:06:36.536303 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:06:36 crc kubenswrapper[4967]: E1121 16:06:36.537280 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:06:38 crc kubenswrapper[4967]: I1121 16:06:38.038929 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-r85k5"] Nov 21 16:06:38 crc kubenswrapper[4967]: I1121 16:06:38.050357 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-r85k5"] Nov 21 16:06:38 crc kubenswrapper[4967]: I1121 16:06:38.561799 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd1dc42f-c657-4dd3-9ca3-e8bc865d6280" path="/var/lib/kubelet/pods/dd1dc42f-c657-4dd3-9ca3-e8bc865d6280/volumes" Nov 21 16:06:41 crc kubenswrapper[4967]: I1121 16:06:41.034803 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-96plk"] Nov 21 16:06:41 crc kubenswrapper[4967]: I1121 16:06:41.049244 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-96plk"] Nov 21 16:06:42 crc kubenswrapper[4967]: I1121 16:06:42.028559 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-pjq5x"] Nov 21 16:06:42 crc kubenswrapper[4967]: I1121 16:06:42.039417 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-pjq5x"] Nov 21 16:06:42 crc kubenswrapper[4967]: I1121 16:06:42.585272 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad" path="/var/lib/kubelet/pods/71cb393a-d56c-4ddc-8bb6-8b7ea26ef9ad/volumes" Nov 21 16:06:42 crc kubenswrapper[4967]: I1121 16:06:42.588021 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0fc2724-5c56-4db8-9a1e-4662761791c3" path="/var/lib/kubelet/pods/e0fc2724-5c56-4db8-9a1e-4662761791c3/volumes" Nov 21 16:06:50 crc kubenswrapper[4967]: I1121 16:06:50.536545 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:06:50 crc kubenswrapper[4967]: E1121 16:06:50.537355 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:06:55 crc kubenswrapper[4967]: I1121 16:06:55.031577 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-tcdk8"] Nov 21 16:06:55 crc kubenswrapper[4967]: I1121 16:06:55.042435 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-tcdk8"] Nov 21 16:06:56 crc kubenswrapper[4967]: I1121 16:06:56.550278 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db63398d-117e-4a60-b548-e1684dbef263" path="/var/lib/kubelet/pods/db63398d-117e-4a60-b548-e1684dbef263/volumes" Nov 21 16:07:02 crc kubenswrapper[4967]: I1121 16:07:02.081203 4967 generic.go:334] "Generic (PLEG): container finished" podID="66c72c31-e791-478b-bbfc-3ba795c580e9" containerID="41a2b29b6f66f453debd482de467a7113dcc3f883700762578a5f22873ee583e" exitCode=0 Nov 21 16:07:02 crc kubenswrapper[4967]: I1121 16:07:02.081346 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" event={"ID":"66c72c31-e791-478b-bbfc-3ba795c580e9","Type":"ContainerDied","Data":"41a2b29b6f66f453debd482de467a7113dcc3f883700762578a5f22873ee583e"} Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.643657 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.669478 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key\") pod \"66c72c31-e791-478b-bbfc-3ba795c580e9\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.669687 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory\") pod \"66c72c31-e791-478b-bbfc-3ba795c580e9\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.669745 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtngm\" (UniqueName: \"kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm\") pod \"66c72c31-e791-478b-bbfc-3ba795c580e9\" (UID: \"66c72c31-e791-478b-bbfc-3ba795c580e9\") " Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.716993 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm" (OuterVolumeSpecName: "kube-api-access-mtngm") pod "66c72c31-e791-478b-bbfc-3ba795c580e9" (UID: "66c72c31-e791-478b-bbfc-3ba795c580e9"). InnerVolumeSpecName "kube-api-access-mtngm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.730424 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "66c72c31-e791-478b-bbfc-3ba795c580e9" (UID: "66c72c31-e791-478b-bbfc-3ba795c580e9"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.738487 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory" (OuterVolumeSpecName: "inventory") pod "66c72c31-e791-478b-bbfc-3ba795c580e9" (UID: "66c72c31-e791-478b-bbfc-3ba795c580e9"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.778553 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.778585 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66c72c31-e791-478b-bbfc-3ba795c580e9-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:07:03 crc kubenswrapper[4967]: I1121 16:07:03.778597 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtngm\" (UniqueName: \"kubernetes.io/projected/66c72c31-e791-478b-bbfc-3ba795c580e9-kube-api-access-mtngm\") on node \"crc\" DevicePath \"\"" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.113352 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" event={"ID":"66c72c31-e791-478b-bbfc-3ba795c580e9","Type":"ContainerDied","Data":"46b57ed09d91e603c1836d569ba0e78cba74057c9399086e09fa50e1fcc9b648"} Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.113666 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46b57ed09d91e603c1836d569ba0e78cba74057c9399086e09fa50e1fcc9b648" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.113384 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.186894 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r"] Nov 21 16:07:04 crc kubenswrapper[4967]: E1121 16:07:04.187434 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66c72c31-e791-478b-bbfc-3ba795c580e9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.187456 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="66c72c31-e791-478b-bbfc-3ba795c580e9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.187791 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="66c72c31-e791-478b-bbfc-3ba795c580e9" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.188872 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.191217 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.192273 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.192287 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.193696 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.198165 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r"] Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.306967 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.307025 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cczs\" (UniqueName: \"kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.307069 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.409664 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.409736 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cczs\" (UniqueName: \"kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.409778 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.420442 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.420445 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.426918 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cczs\" (UniqueName: \"kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.520762 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:07:04 crc kubenswrapper[4967]: I1121 16:07:04.540528 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:07:04 crc kubenswrapper[4967]: E1121 16:07:04.540981 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:07:05 crc kubenswrapper[4967]: I1121 16:07:05.060258 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r"] Nov 21 16:07:05 crc kubenswrapper[4967]: I1121 16:07:05.066005 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:07:05 crc kubenswrapper[4967]: I1121 16:07:05.125886 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" event={"ID":"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2","Type":"ContainerStarted","Data":"cc997291e5a1536aea5a945722b90ee42516c8abafa219eabf744af456af5bfd"} Nov 21 16:07:07 crc kubenswrapper[4967]: I1121 16:07:07.150722 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" event={"ID":"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2","Type":"ContainerStarted","Data":"43567be8e08ed59a089edf38ae8101cd225d3d28b69b14b424f7b4783f7029f2"} Nov 21 16:07:07 crc kubenswrapper[4967]: I1121 16:07:07.168873 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" podStartSLOduration=1.403582938 podStartE2EDuration="3.168850604s" podCreationTimestamp="2025-11-21 16:07:04 +0000 UTC" firstStartedPulling="2025-11-21 16:07:05.065774723 +0000 UTC m=+1913.324295731" lastFinishedPulling="2025-11-21 16:07:06.831042389 +0000 UTC m=+1915.089563397" observedRunningTime="2025-11-21 16:07:07.166412964 +0000 UTC m=+1915.424933982" watchObservedRunningTime="2025-11-21 16:07:07.168850604 +0000 UTC m=+1915.427371612" Nov 21 16:07:15 crc kubenswrapper[4967]: I1121 16:07:15.536852 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:07:15 crc kubenswrapper[4967]: E1121 16:07:15.537709 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:07:21 crc kubenswrapper[4967]: I1121 16:07:21.760751 4967 scope.go:117] "RemoveContainer" containerID="f0b7e826cff02dfc47860f3f0c7f87b7911d250ed06464577aaf8ef203efb96a" Nov 21 16:07:21 crc kubenswrapper[4967]: I1121 16:07:21.850283 4967 scope.go:117] "RemoveContainer" containerID="5a68a1617fd748c8f26d252405e183a72786bedb5ce9ad831364ec556cf7f9c8" Nov 21 16:07:21 crc kubenswrapper[4967]: I1121 16:07:21.919696 4967 scope.go:117] "RemoveContainer" containerID="dc70f55b12b5d706d65e3c4210fa1892ead921c65eba34d129c8494f817d418e" Nov 21 16:07:21 crc kubenswrapper[4967]: I1121 16:07:21.972053 4967 scope.go:117] "RemoveContainer" containerID="3ab8ec91182125356d6f72f13f5cd55bcab37827d2242e80ed41a42216834c91" Nov 21 16:07:22 crc kubenswrapper[4967]: I1121 16:07:22.030248 4967 scope.go:117] "RemoveContainer" containerID="699e112236b049b9b6e92fd13712daa433d3c0930373e844ba272abeb4c508d3" Nov 21 16:07:22 crc kubenswrapper[4967]: I1121 16:07:22.097255 4967 scope.go:117] "RemoveContainer" containerID="53109c647d135c73ad4240c502d8f1fec68a0806f504bb19180addbd8c74564d" Nov 21 16:07:23 crc kubenswrapper[4967]: I1121 16:07:23.076368 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-fdv92"] Nov 21 16:07:23 crc kubenswrapper[4967]: I1121 16:07:23.088484 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-fdv92"] Nov 21 16:07:23 crc kubenswrapper[4967]: I1121 16:07:23.099185 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-9c4e-account-create-kpr2p"] Nov 21 16:07:23 crc kubenswrapper[4967]: I1121 16:07:23.109561 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-9c4e-account-create-kpr2p"] Nov 21 16:07:24 crc kubenswrapper[4967]: I1121 16:07:24.551948 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2e282d2-51b3-46f2-9ce8-faa9ad9fec16" path="/var/lib/kubelet/pods/e2e282d2-51b3-46f2-9ce8-faa9ad9fec16/volumes" Nov 21 16:07:24 crc kubenswrapper[4967]: I1121 16:07:24.553193 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f57422a0-a226-4bd9-8dc7-ebfee76b5745" path="/var/lib/kubelet/pods/f57422a0-a226-4bd9-8dc7-ebfee76b5745/volumes" Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.033041 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-wrdfr"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.046851 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7b9f-account-create-ccd9f"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.059602 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-2dfc-account-create-c7vxv"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.072939 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-xlhnx"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.085024 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7b9f-account-create-ccd9f"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.093713 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-wrdfr"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.105735 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-xlhnx"] Nov 21 16:07:25 crc kubenswrapper[4967]: I1121 16:07:25.115047 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-2dfc-account-create-c7vxv"] Nov 21 16:07:26 crc kubenswrapper[4967]: I1121 16:07:26.536256 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:07:26 crc kubenswrapper[4967]: I1121 16:07:26.552217 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22f15a48-c17e-477d-90d1-ea57d28f1457" path="/var/lib/kubelet/pods/22f15a48-c17e-477d-90d1-ea57d28f1457/volumes" Nov 21 16:07:26 crc kubenswrapper[4967]: I1121 16:07:26.553955 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761" path="/var/lib/kubelet/pods/7bec8f2e-bd37-48fe-8e1e-25a8ccfeb761/volumes" Nov 21 16:07:26 crc kubenswrapper[4967]: I1121 16:07:26.554708 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fe6e979-4546-4f1f-8c36-d57c3bb578bf" path="/var/lib/kubelet/pods/8fe6e979-4546-4f1f-8c36-d57c3bb578bf/volumes" Nov 21 16:07:26 crc kubenswrapper[4967]: I1121 16:07:26.556658 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8fd0d7f-cf04-4fef-8825-4a5f82a76e22" path="/var/lib/kubelet/pods/f8fd0d7f-cf04-4fef-8825-4a5f82a76e22/volumes" Nov 21 16:07:27 crc kubenswrapper[4967]: I1121 16:07:27.356595 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e"} Nov 21 16:08:17 crc kubenswrapper[4967]: I1121 16:08:17.886669 4967 generic.go:334] "Generic (PLEG): container finished" podID="ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" containerID="43567be8e08ed59a089edf38ae8101cd225d3d28b69b14b424f7b4783f7029f2" exitCode=0 Nov 21 16:08:17 crc kubenswrapper[4967]: I1121 16:08:17.886752 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" event={"ID":"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2","Type":"ContainerDied","Data":"43567be8e08ed59a089edf38ae8101cd225d3d28b69b14b424f7b4783f7029f2"} Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.434884 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.510275 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory\") pod \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.510437 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cczs\" (UniqueName: \"kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs\") pod \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.510764 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key\") pod \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\" (UID: \"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2\") " Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.519796 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs" (OuterVolumeSpecName: "kube-api-access-5cczs") pod "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" (UID: "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2"). InnerVolumeSpecName "kube-api-access-5cczs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.549420 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory" (OuterVolumeSpecName: "inventory") pod "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" (UID: "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.552398 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" (UID: "ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.613816 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.613847 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.613857 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cczs\" (UniqueName: \"kubernetes.io/projected/ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2-kube-api-access-5cczs\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.916169 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.916147 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r" event={"ID":"ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2","Type":"ContainerDied","Data":"cc997291e5a1536aea5a945722b90ee42516c8abafa219eabf744af456af5bfd"} Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.916567 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc997291e5a1536aea5a945722b90ee42516c8abafa219eabf744af456af5bfd" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.999293 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc"] Nov 21 16:08:19 crc kubenswrapper[4967]: E1121 16:08:19.999875 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:19 crc kubenswrapper[4967]: I1121 16:08:19.999904 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.000243 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.002273 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.005163 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.005430 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.005519 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.006586 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.011834 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc"] Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.125685 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.126408 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrf29\" (UniqueName: \"kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.126888 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.229551 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.229688 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.229833 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrf29\" (UniqueName: \"kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.236651 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.236722 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.250419 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrf29\" (UniqueName: \"kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.325852 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.905775 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc"] Nov 21 16:08:20 crc kubenswrapper[4967]: I1121 16:08:20.926237 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" event={"ID":"0abd8e9d-af5c-4b71-884e-03155f8a630a","Type":"ContainerStarted","Data":"6bfc8c4c0ff20abaff18c2af115fb1f971d690632314c440853475cd79e524c6"} Nov 21 16:08:21 crc kubenswrapper[4967]: I1121 16:08:21.937481 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" event={"ID":"0abd8e9d-af5c-4b71-884e-03155f8a630a","Type":"ContainerStarted","Data":"1ad8d293091bc91b69f557c543bdb76af4dbcbc69b305f0baddf6a0096380c90"} Nov 21 16:08:21 crc kubenswrapper[4967]: I1121 16:08:21.952432 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" podStartSLOduration=2.5544617609999998 podStartE2EDuration="2.95241777s" podCreationTimestamp="2025-11-21 16:08:19 +0000 UTC" firstStartedPulling="2025-11-21 16:08:20.910959655 +0000 UTC m=+1989.169480663" lastFinishedPulling="2025-11-21 16:08:21.308915674 +0000 UTC m=+1989.567436672" observedRunningTime="2025-11-21 16:08:21.949880717 +0000 UTC m=+1990.208401725" watchObservedRunningTime="2025-11-21 16:08:21.95241777 +0000 UTC m=+1990.210938778" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.259214 4967 scope.go:117] "RemoveContainer" containerID="86d40ebd8df9f146aee1086f634b627cd8746a19100c5cbe08b74e88ed893255" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.285928 4967 scope.go:117] "RemoveContainer" containerID="7bfd2aa0456954b687486a48f4e829eca9f8e1ccc619b50708eaa953365eb14f" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.340783 4967 scope.go:117] "RemoveContainer" containerID="64eff180864c1f037e56b88c4f75e3c4ed44a1ec22455367107adc5bb31de9db" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.404439 4967 scope.go:117] "RemoveContainer" containerID="962abd56387290c95f8600b8a5b53ef4285dcb8d4ef098aaa3a5d5a53b533f65" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.452500 4967 scope.go:117] "RemoveContainer" containerID="474bed44826e965d976c3d9e8b0d2b739ca8ae2309504f4c69a8d78eaf00d9bf" Nov 21 16:08:22 crc kubenswrapper[4967]: I1121 16:08:22.504903 4967 scope.go:117] "RemoveContainer" containerID="4f34fa2b5e434b0b6be9fe203444c2a143aae4ff35e08de7ea19c220337413f5" Nov 21 16:08:25 crc kubenswrapper[4967]: I1121 16:08:25.039736 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-fhn2k"] Nov 21 16:08:25 crc kubenswrapper[4967]: I1121 16:08:25.050071 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-7569-account-create-lgmgg"] Nov 21 16:08:25 crc kubenswrapper[4967]: I1121 16:08:25.060873 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-fhn2k"] Nov 21 16:08:25 crc kubenswrapper[4967]: I1121 16:08:25.070887 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-7569-account-create-lgmgg"] Nov 21 16:08:26 crc kubenswrapper[4967]: I1121 16:08:26.551040 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="862f7b87-b5b9-4c04-a219-b44da3e3b16d" path="/var/lib/kubelet/pods/862f7b87-b5b9-4c04-a219-b44da3e3b16d/volumes" Nov 21 16:08:26 crc kubenswrapper[4967]: I1121 16:08:26.552139 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f95620e9-c5f1-4947-ac6f-9552b76dc96c" path="/var/lib/kubelet/pods/f95620e9-c5f1-4947-ac6f-9552b76dc96c/volumes" Nov 21 16:08:26 crc kubenswrapper[4967]: I1121 16:08:26.995544 4967 generic.go:334] "Generic (PLEG): container finished" podID="0abd8e9d-af5c-4b71-884e-03155f8a630a" containerID="1ad8d293091bc91b69f557c543bdb76af4dbcbc69b305f0baddf6a0096380c90" exitCode=0 Nov 21 16:08:26 crc kubenswrapper[4967]: I1121 16:08:26.995587 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" event={"ID":"0abd8e9d-af5c-4b71-884e-03155f8a630a","Type":"ContainerDied","Data":"1ad8d293091bc91b69f557c543bdb76af4dbcbc69b305f0baddf6a0096380c90"} Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.508044 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.637466 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key\") pod \"0abd8e9d-af5c-4b71-884e-03155f8a630a\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.637518 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrf29\" (UniqueName: \"kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29\") pod \"0abd8e9d-af5c-4b71-884e-03155f8a630a\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.637594 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory\") pod \"0abd8e9d-af5c-4b71-884e-03155f8a630a\" (UID: \"0abd8e9d-af5c-4b71-884e-03155f8a630a\") " Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.642821 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29" (OuterVolumeSpecName: "kube-api-access-xrf29") pod "0abd8e9d-af5c-4b71-884e-03155f8a630a" (UID: "0abd8e9d-af5c-4b71-884e-03155f8a630a"). InnerVolumeSpecName "kube-api-access-xrf29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.667761 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0abd8e9d-af5c-4b71-884e-03155f8a630a" (UID: "0abd8e9d-af5c-4b71-884e-03155f8a630a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.670972 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory" (OuterVolumeSpecName: "inventory") pod "0abd8e9d-af5c-4b71-884e-03155f8a630a" (UID: "0abd8e9d-af5c-4b71-884e-03155f8a630a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.740760 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.740806 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrf29\" (UniqueName: \"kubernetes.io/projected/0abd8e9d-af5c-4b71-884e-03155f8a630a-kube-api-access-xrf29\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:28 crc kubenswrapper[4967]: I1121 16:08:28.740818 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0abd8e9d-af5c-4b71-884e-03155f8a630a-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.017940 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" event={"ID":"0abd8e9d-af5c-4b71-884e-03155f8a630a","Type":"ContainerDied","Data":"6bfc8c4c0ff20abaff18c2af115fb1f971d690632314c440853475cd79e524c6"} Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.017992 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bfc8c4c0ff20abaff18c2af115fb1f971d690632314c440853475cd79e524c6" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.018010 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.100261 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx"] Nov 21 16:08:29 crc kubenswrapper[4967]: E1121 16:08:29.100740 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0abd8e9d-af5c-4b71-884e-03155f8a630a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.100762 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0abd8e9d-af5c-4b71-884e-03155f8a630a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.100983 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0abd8e9d-af5c-4b71-884e-03155f8a630a" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.101795 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.104080 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.104168 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.105326 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.105551 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.113787 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx"] Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.251212 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.251341 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwx5r\" (UniqueName: \"kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.251377 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.353483 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.353572 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwx5r\" (UniqueName: \"kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.353617 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.359173 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.364050 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.372359 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwx5r\" (UniqueName: \"kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-pngrx\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.421405 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:08:29 crc kubenswrapper[4967]: I1121 16:08:29.941740 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx"] Nov 21 16:08:30 crc kubenswrapper[4967]: I1121 16:08:30.028680 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" event={"ID":"ba294b04-629f-4369-be8f-07debefffcb8","Type":"ContainerStarted","Data":"afeee01c18b3065eb1fee58f7d858120a048343ed85671ea46db733a9c82458f"} Nov 21 16:08:31 crc kubenswrapper[4967]: I1121 16:08:31.045518 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" event={"ID":"ba294b04-629f-4369-be8f-07debefffcb8","Type":"ContainerStarted","Data":"5c44ec24cabf342bc19c5fcdca984ecd096911b09f11db9e913f7e5b3629f8e6"} Nov 21 16:08:31 crc kubenswrapper[4967]: I1121 16:08:31.070944 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" podStartSLOduration=1.448242898 podStartE2EDuration="2.070928303s" podCreationTimestamp="2025-11-21 16:08:29 +0000 UTC" firstStartedPulling="2025-11-21 16:08:29.952781389 +0000 UTC m=+1998.211302397" lastFinishedPulling="2025-11-21 16:08:30.575466794 +0000 UTC m=+1998.833987802" observedRunningTime="2025-11-21 16:08:31.060939465 +0000 UTC m=+1999.319460473" watchObservedRunningTime="2025-11-21 16:08:31.070928303 +0000 UTC m=+1999.329449311" Nov 21 16:08:36 crc kubenswrapper[4967]: I1121 16:08:36.047450 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-9djfm"] Nov 21 16:08:36 crc kubenswrapper[4967]: I1121 16:08:36.057212 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-9djfm"] Nov 21 16:08:36 crc kubenswrapper[4967]: I1121 16:08:36.549898 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612fd1ac-5081-454b-946f-85dff74ddf0c" path="/var/lib/kubelet/pods/612fd1ac-5081-454b-946f-85dff74ddf0c/volumes" Nov 21 16:08:39 crc kubenswrapper[4967]: I1121 16:08:39.031446 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jcvtj"] Nov 21 16:08:39 crc kubenswrapper[4967]: I1121 16:08:39.044553 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jcvtj"] Nov 21 16:08:40 crc kubenswrapper[4967]: I1121 16:08:40.549731 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6adf654d-d462-4c77-98c5-33b5a6bd9e44" path="/var/lib/kubelet/pods/6adf654d-d462-4c77-98c5-33b5a6bd9e44/volumes" Nov 21 16:09:07 crc kubenswrapper[4967]: I1121 16:09:07.410405 4967 generic.go:334] "Generic (PLEG): container finished" podID="ba294b04-629f-4369-be8f-07debefffcb8" containerID="5c44ec24cabf342bc19c5fcdca984ecd096911b09f11db9e913f7e5b3629f8e6" exitCode=0 Nov 21 16:09:07 crc kubenswrapper[4967]: I1121 16:09:07.410477 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" event={"ID":"ba294b04-629f-4369-be8f-07debefffcb8","Type":"ContainerDied","Data":"5c44ec24cabf342bc19c5fcdca984ecd096911b09f11db9e913f7e5b3629f8e6"} Nov 21 16:09:08 crc kubenswrapper[4967]: I1121 16:09:08.894432 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.052344 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-f6s7x"] Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.064975 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-f6s7x"] Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.077747 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key\") pod \"ba294b04-629f-4369-be8f-07debefffcb8\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.077886 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwx5r\" (UniqueName: \"kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r\") pod \"ba294b04-629f-4369-be8f-07debefffcb8\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.077975 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory\") pod \"ba294b04-629f-4369-be8f-07debefffcb8\" (UID: \"ba294b04-629f-4369-be8f-07debefffcb8\") " Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.083909 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r" (OuterVolumeSpecName: "kube-api-access-hwx5r") pod "ba294b04-629f-4369-be8f-07debefffcb8" (UID: "ba294b04-629f-4369-be8f-07debefffcb8"). InnerVolumeSpecName "kube-api-access-hwx5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.111528 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory" (OuterVolumeSpecName: "inventory") pod "ba294b04-629f-4369-be8f-07debefffcb8" (UID: "ba294b04-629f-4369-be8f-07debefffcb8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.117302 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ba294b04-629f-4369-be8f-07debefffcb8" (UID: "ba294b04-629f-4369-be8f-07debefffcb8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.180140 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.180175 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwx5r\" (UniqueName: \"kubernetes.io/projected/ba294b04-629f-4369-be8f-07debefffcb8-kube-api-access-hwx5r\") on node \"crc\" DevicePath \"\"" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.180188 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba294b04-629f-4369-be8f-07debefffcb8-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.431337 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" event={"ID":"ba294b04-629f-4369-be8f-07debefffcb8","Type":"ContainerDied","Data":"afeee01c18b3065eb1fee58f7d858120a048343ed85671ea46db733a9c82458f"} Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.431375 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="afeee01c18b3065eb1fee58f7d858120a048343ed85671ea46db733a9c82458f" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.431391 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-pngrx" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.531787 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2"] Nov 21 16:09:09 crc kubenswrapper[4967]: E1121 16:09:09.532501 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba294b04-629f-4369-be8f-07debefffcb8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.532526 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba294b04-629f-4369-be8f-07debefffcb8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.532778 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba294b04-629f-4369-be8f-07debefffcb8" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.533649 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.536678 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.536950 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.538343 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.538545 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.547096 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2"] Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.588234 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.588391 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfzt9\" (UniqueName: \"kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.588475 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.690549 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfzt9\" (UniqueName: \"kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.690650 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.690784 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.695706 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.697446 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.709526 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfzt9\" (UniqueName: \"kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:09 crc kubenswrapper[4967]: I1121 16:09:09.862287 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:09:10 crc kubenswrapper[4967]: I1121 16:09:10.422658 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2"] Nov 21 16:09:10 crc kubenswrapper[4967]: I1121 16:09:10.449190 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" event={"ID":"b068be91-0b69-4778-b47a-2ecb6a9c040a","Type":"ContainerStarted","Data":"1b40b9499c0ff1d1707b80ec566d140119e3ea60c31b42b4de2fe794377a45b4"} Nov 21 16:09:10 crc kubenswrapper[4967]: I1121 16:09:10.551590 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2225c0b-f7f0-4d45-80cd-cde7456d6f15" path="/var/lib/kubelet/pods/c2225c0b-f7f0-4d45-80cd-cde7456d6f15/volumes" Nov 21 16:09:11 crc kubenswrapper[4967]: I1121 16:09:11.461329 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" event={"ID":"b068be91-0b69-4778-b47a-2ecb6a9c040a","Type":"ContainerStarted","Data":"1652ef09cc4c06f5ebe6b7bdd81c00507576fab4728f947deab76709ce97582b"} Nov 21 16:09:11 crc kubenswrapper[4967]: I1121 16:09:11.480533 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" podStartSLOduration=2.074368691 podStartE2EDuration="2.480512156s" podCreationTimestamp="2025-11-21 16:09:09 +0000 UTC" firstStartedPulling="2025-11-21 16:09:10.430875306 +0000 UTC m=+2038.689396314" lastFinishedPulling="2025-11-21 16:09:10.837018771 +0000 UTC m=+2039.095539779" observedRunningTime="2025-11-21 16:09:11.475889703 +0000 UTC m=+2039.734410721" watchObservedRunningTime="2025-11-21 16:09:11.480512156 +0000 UTC m=+2039.739033164" Nov 21 16:09:15 crc kubenswrapper[4967]: I1121 16:09:15.034166 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z9vxs"] Nov 21 16:09:15 crc kubenswrapper[4967]: I1121 16:09:15.048076 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-z9vxs"] Nov 21 16:09:16 crc kubenswrapper[4967]: I1121 16:09:16.550543 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="827df8c5-068d-48b6-af4d-f971bdacdcb3" path="/var/lib/kubelet/pods/827df8c5-068d-48b6-af4d-f971bdacdcb3/volumes" Nov 21 16:09:22 crc kubenswrapper[4967]: I1121 16:09:22.771328 4967 scope.go:117] "RemoveContainer" containerID="833feda4ee0b6e294c94ad306d993a359e2b8ed014c52490b58f625057a45ad7" Nov 21 16:09:22 crc kubenswrapper[4967]: I1121 16:09:22.805257 4967 scope.go:117] "RemoveContainer" containerID="d2ee832382dc1cc718f6950638826b8fa3c219d55626644d5e70e941327cf3a8" Nov 21 16:09:22 crc kubenswrapper[4967]: I1121 16:09:22.861099 4967 scope.go:117] "RemoveContainer" containerID="836e3046d9e0e13c969f1e742e556cd8f9ecd3056d0193761cfce2ef1a63c2a9" Nov 21 16:09:22 crc kubenswrapper[4967]: I1121 16:09:22.920355 4967 scope.go:117] "RemoveContainer" containerID="70b9fd1a634edec4ed2d2f40eb9cad169a2b3356fada7646857b8e48488baa8c" Nov 21 16:09:22 crc kubenswrapper[4967]: I1121 16:09:22.975221 4967 scope.go:117] "RemoveContainer" containerID="457e37dcd3010e5e4e5e0d136b9876e4dca552466e91884af6e37ff30a286cda" Nov 21 16:09:23 crc kubenswrapper[4967]: I1121 16:09:23.024138 4967 scope.go:117] "RemoveContainer" containerID="e45d4e062cfb62dfb5575a82f61551d76d148cd72b894b09b0c7b3734bff241e" Nov 21 16:09:46 crc kubenswrapper[4967]: I1121 16:09:46.522748 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:09:46 crc kubenswrapper[4967]: I1121 16:09:46.523261 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:09:56 crc kubenswrapper[4967]: I1121 16:09:56.044148 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-vfwfv"] Nov 21 16:09:56 crc kubenswrapper[4967]: I1121 16:09:56.053447 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-vfwfv"] Nov 21 16:09:56 crc kubenswrapper[4967]: I1121 16:09:56.552589 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55d04a95-27d7-409f-a688-f92ddb3e579c" path="/var/lib/kubelet/pods/55d04a95-27d7-409f-a688-f92ddb3e579c/volumes" Nov 21 16:09:59 crc kubenswrapper[4967]: I1121 16:09:59.975011 4967 generic.go:334] "Generic (PLEG): container finished" podID="b068be91-0b69-4778-b47a-2ecb6a9c040a" containerID="1652ef09cc4c06f5ebe6b7bdd81c00507576fab4728f947deab76709ce97582b" exitCode=0 Nov 21 16:09:59 crc kubenswrapper[4967]: I1121 16:09:59.975119 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" event={"ID":"b068be91-0b69-4778-b47a-2ecb6a9c040a","Type":"ContainerDied","Data":"1652ef09cc4c06f5ebe6b7bdd81c00507576fab4728f947deab76709ce97582b"} Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.513088 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.637679 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfzt9\" (UniqueName: \"kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9\") pod \"b068be91-0b69-4778-b47a-2ecb6a9c040a\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.637916 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key\") pod \"b068be91-0b69-4778-b47a-2ecb6a9c040a\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.638101 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") pod \"b068be91-0b69-4778-b47a-2ecb6a9c040a\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.644061 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9" (OuterVolumeSpecName: "kube-api-access-lfzt9") pod "b068be91-0b69-4778-b47a-2ecb6a9c040a" (UID: "b068be91-0b69-4778-b47a-2ecb6a9c040a"). InnerVolumeSpecName "kube-api-access-lfzt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:01 crc kubenswrapper[4967]: E1121 16:10:01.666019 4967 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory podName:b068be91-0b69-4778-b47a-2ecb6a9c040a nodeName:}" failed. No retries permitted until 2025-11-21 16:10:02.165992517 +0000 UTC m=+2090.424513525 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory") pod "b068be91-0b69-4778-b47a-2ecb6a9c040a" (UID: "b068be91-0b69-4778-b47a-2ecb6a9c040a") : error deleting /var/lib/kubelet/pods/b068be91-0b69-4778-b47a-2ecb6a9c040a/volume-subpaths: remove /var/lib/kubelet/pods/b068be91-0b69-4778-b47a-2ecb6a9c040a/volume-subpaths: no such file or directory Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.668642 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b068be91-0b69-4778-b47a-2ecb6a9c040a" (UID: "b068be91-0b69-4778-b47a-2ecb6a9c040a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.741055 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.741093 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfzt9\" (UniqueName: \"kubernetes.io/projected/b068be91-0b69-4778-b47a-2ecb6a9c040a-kube-api-access-lfzt9\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.996584 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" event={"ID":"b068be91-0b69-4778-b47a-2ecb6a9c040a","Type":"ContainerDied","Data":"1b40b9499c0ff1d1707b80ec566d140119e3ea60c31b42b4de2fe794377a45b4"} Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.996635 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b40b9499c0ff1d1707b80ec566d140119e3ea60c31b42b4de2fe794377a45b4" Nov 21 16:10:01 crc kubenswrapper[4967]: I1121 16:10:01.996648 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.081839 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-pv72w"] Nov 21 16:10:02 crc kubenswrapper[4967]: E1121 16:10:02.082544 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b068be91-0b69-4778-b47a-2ecb6a9c040a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.082571 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b068be91-0b69-4778-b47a-2ecb6a9c040a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.082901 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b068be91-0b69-4778-b47a-2ecb6a9c040a" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.083963 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.105192 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-pv72w"] Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.150293 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.150374 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.150431 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg2l5\" (UniqueName: \"kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.252483 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") pod \"b068be91-0b69-4778-b47a-2ecb6a9c040a\" (UID: \"b068be91-0b69-4778-b47a-2ecb6a9c040a\") " Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.253107 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.253192 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg2l5\" (UniqueName: \"kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.253454 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.256360 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory" (OuterVolumeSpecName: "inventory") pod "b068be91-0b69-4778-b47a-2ecb6a9c040a" (UID: "b068be91-0b69-4778-b47a-2ecb6a9c040a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.257164 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.257668 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.271258 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg2l5\" (UniqueName: \"kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5\") pod \"ssh-known-hosts-edpm-deployment-pv72w\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.356021 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b068be91-0b69-4778-b47a-2ecb6a9c040a-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.402138 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:02 crc kubenswrapper[4967]: I1121 16:10:02.922136 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-pv72w"] Nov 21 16:10:03 crc kubenswrapper[4967]: I1121 16:10:03.016027 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" event={"ID":"11231449-813f-4d1a-846e-997b1de5349f","Type":"ContainerStarted","Data":"73a06b1bab0f5d52165d53110b7c25741fd9064f2fef6af2325e6efdead270ce"} Nov 21 16:10:05 crc kubenswrapper[4967]: I1121 16:10:05.039973 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" event={"ID":"11231449-813f-4d1a-846e-997b1de5349f","Type":"ContainerStarted","Data":"ec8d695d01ee4244788f83a0c405311f5014368e9cb1615edd7d8d08ca45f3bb"} Nov 21 16:10:05 crc kubenswrapper[4967]: I1121 16:10:05.060789 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" podStartSLOduration=2.010362532 podStartE2EDuration="3.060769644s" podCreationTimestamp="2025-11-21 16:10:02 +0000 UTC" firstStartedPulling="2025-11-21 16:10:02.930369527 +0000 UTC m=+2091.188890535" lastFinishedPulling="2025-11-21 16:10:03.980776639 +0000 UTC m=+2092.239297647" observedRunningTime="2025-11-21 16:10:05.055617225 +0000 UTC m=+2093.314138233" watchObservedRunningTime="2025-11-21 16:10:05.060769644 +0000 UTC m=+2093.319290652" Nov 21 16:10:11 crc kubenswrapper[4967]: I1121 16:10:11.112044 4967 generic.go:334] "Generic (PLEG): container finished" podID="11231449-813f-4d1a-846e-997b1de5349f" containerID="ec8d695d01ee4244788f83a0c405311f5014368e9cb1615edd7d8d08ca45f3bb" exitCode=0 Nov 21 16:10:11 crc kubenswrapper[4967]: I1121 16:10:11.112143 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" event={"ID":"11231449-813f-4d1a-846e-997b1de5349f","Type":"ContainerDied","Data":"ec8d695d01ee4244788f83a0c405311f5014368e9cb1615edd7d8d08ca45f3bb"} Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.687242 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.811653 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam\") pod \"11231449-813f-4d1a-846e-997b1de5349f\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.812139 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0\") pod \"11231449-813f-4d1a-846e-997b1de5349f\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.812505 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wg2l5\" (UniqueName: \"kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5\") pod \"11231449-813f-4d1a-846e-997b1de5349f\" (UID: \"11231449-813f-4d1a-846e-997b1de5349f\") " Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.817118 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5" (OuterVolumeSpecName: "kube-api-access-wg2l5") pod "11231449-813f-4d1a-846e-997b1de5349f" (UID: "11231449-813f-4d1a-846e-997b1de5349f"). InnerVolumeSpecName "kube-api-access-wg2l5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.847474 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "11231449-813f-4d1a-846e-997b1de5349f" (UID: "11231449-813f-4d1a-846e-997b1de5349f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.848480 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "11231449-813f-4d1a-846e-997b1de5349f" (UID: "11231449-813f-4d1a-846e-997b1de5349f"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.915100 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wg2l5\" (UniqueName: \"kubernetes.io/projected/11231449-813f-4d1a-846e-997b1de5349f-kube-api-access-wg2l5\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.915146 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:12 crc kubenswrapper[4967]: I1121 16:10:12.915161 4967 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/11231449-813f-4d1a-846e-997b1de5349f-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.133560 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" event={"ID":"11231449-813f-4d1a-846e-997b1de5349f","Type":"ContainerDied","Data":"73a06b1bab0f5d52165d53110b7c25741fd9064f2fef6af2325e6efdead270ce"} Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.133857 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73a06b1bab0f5d52165d53110b7c25741fd9064f2fef6af2325e6efdead270ce" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.133608 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-pv72w" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.203957 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh"] Nov 21 16:10:13 crc kubenswrapper[4967]: E1121 16:10:13.204832 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11231449-813f-4d1a-846e-997b1de5349f" containerName="ssh-known-hosts-edpm-deployment" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.204860 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="11231449-813f-4d1a-846e-997b1de5349f" containerName="ssh-known-hosts-edpm-deployment" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.205150 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="11231449-813f-4d1a-846e-997b1de5349f" containerName="ssh-known-hosts-edpm-deployment" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.206175 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.209049 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.209081 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.209400 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.209500 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.226021 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh"] Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.323440 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbktn\" (UniqueName: \"kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.323506 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.323745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.410179 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.413079 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.422917 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.426497 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbktn\" (UniqueName: \"kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.426556 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.426657 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.452655 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbktn\" (UniqueName: \"kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.453279 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.468014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-pg7sh\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.527725 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.528763 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.528822 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hln8l\" (UniqueName: \"kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.528870 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.631301 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.631576 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hln8l\" (UniqueName: \"kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.631631 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.634362 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.634884 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.652564 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hln8l\" (UniqueName: \"kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l\") pod \"redhat-marketplace-jdwzg\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:13 crc kubenswrapper[4967]: I1121 16:10:13.757818 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:14 crc kubenswrapper[4967]: I1121 16:10:14.172260 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh"] Nov 21 16:10:14 crc kubenswrapper[4967]: I1121 16:10:14.248692 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:14 crc kubenswrapper[4967]: W1121 16:10:14.252523 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58f9fa94_1160_419c_87f2_0b0a51d6d44c.slice/crio-7cab7bbc37cdb13c6576dca1ceb2a196ba58a64a66dad85868ce7a33c23e2157 WatchSource:0}: Error finding container 7cab7bbc37cdb13c6576dca1ceb2a196ba58a64a66dad85868ce7a33c23e2157: Status 404 returned error can't find the container with id 7cab7bbc37cdb13c6576dca1ceb2a196ba58a64a66dad85868ce7a33c23e2157 Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.156609 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" event={"ID":"8a6f4649-48ad-45af-90ca-ddf024c34a33","Type":"ContainerStarted","Data":"c7ada69fc3bceb059ece5e8e67bd8a04638eafd8644d1fc99e103f82a216a763"} Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.157358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" event={"ID":"8a6f4649-48ad-45af-90ca-ddf024c34a33","Type":"ContainerStarted","Data":"88f3444fefb7b4aaf6c08f3788d8f808f77ae9253e494786fd3c6bdc3f560394"} Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.158247 4967 generic.go:334] "Generic (PLEG): container finished" podID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerID="b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2" exitCode=0 Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.158285 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerDied","Data":"b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2"} Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.158330 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerStarted","Data":"7cab7bbc37cdb13c6576dca1ceb2a196ba58a64a66dad85868ce7a33c23e2157"} Nov 21 16:10:15 crc kubenswrapper[4967]: I1121 16:10:15.176052 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" podStartSLOduration=1.6458333120000002 podStartE2EDuration="2.175638061s" podCreationTimestamp="2025-11-21 16:10:13 +0000 UTC" firstStartedPulling="2025-11-21 16:10:14.193194907 +0000 UTC m=+2102.451715915" lastFinishedPulling="2025-11-21 16:10:14.722999656 +0000 UTC m=+2102.981520664" observedRunningTime="2025-11-21 16:10:15.17074126 +0000 UTC m=+2103.429262268" watchObservedRunningTime="2025-11-21 16:10:15.175638061 +0000 UTC m=+2103.434159069" Nov 21 16:10:16 crc kubenswrapper[4967]: I1121 16:10:16.180407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerStarted","Data":"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b"} Nov 21 16:10:16 crc kubenswrapper[4967]: I1121 16:10:16.522889 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:10:16 crc kubenswrapper[4967]: I1121 16:10:16.523240 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:10:18 crc kubenswrapper[4967]: I1121 16:10:18.207140 4967 generic.go:334] "Generic (PLEG): container finished" podID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerID="edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b" exitCode=0 Nov 21 16:10:18 crc kubenswrapper[4967]: I1121 16:10:18.207180 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerDied","Data":"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b"} Nov 21 16:10:19 crc kubenswrapper[4967]: I1121 16:10:19.219160 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerStarted","Data":"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac"} Nov 21 16:10:19 crc kubenswrapper[4967]: I1121 16:10:19.235397 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jdwzg" podStartSLOduration=2.786938279 podStartE2EDuration="6.235377733s" podCreationTimestamp="2025-11-21 16:10:13 +0000 UTC" firstStartedPulling="2025-11-21 16:10:15.160086673 +0000 UTC m=+2103.418607681" lastFinishedPulling="2025-11-21 16:10:18.608526127 +0000 UTC m=+2106.867047135" observedRunningTime="2025-11-21 16:10:19.234145657 +0000 UTC m=+2107.492666685" watchObservedRunningTime="2025-11-21 16:10:19.235377733 +0000 UTC m=+2107.493898741" Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.231512 4967 scope.go:117] "RemoveContainer" containerID="a053ea3bc8d99e9e237f2de86c3ba08e975e5680c5a89a2f46f05e4701c93d62" Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.261048 4967 generic.go:334] "Generic (PLEG): container finished" podID="8a6f4649-48ad-45af-90ca-ddf024c34a33" containerID="c7ada69fc3bceb059ece5e8e67bd8a04638eafd8644d1fc99e103f82a216a763" exitCode=0 Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.261093 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" event={"ID":"8a6f4649-48ad-45af-90ca-ddf024c34a33","Type":"ContainerDied","Data":"c7ada69fc3bceb059ece5e8e67bd8a04638eafd8644d1fc99e103f82a216a763"} Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.758033 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.758389 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:23 crc kubenswrapper[4967]: I1121 16:10:23.816134 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.328707 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.389076 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.773054 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.806032 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory\") pod \"8a6f4649-48ad-45af-90ca-ddf024c34a33\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.806259 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key\") pod \"8a6f4649-48ad-45af-90ca-ddf024c34a33\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.806475 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbktn\" (UniqueName: \"kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn\") pod \"8a6f4649-48ad-45af-90ca-ddf024c34a33\" (UID: \"8a6f4649-48ad-45af-90ca-ddf024c34a33\") " Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.812703 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn" (OuterVolumeSpecName: "kube-api-access-kbktn") pod "8a6f4649-48ad-45af-90ca-ddf024c34a33" (UID: "8a6f4649-48ad-45af-90ca-ddf024c34a33"). InnerVolumeSpecName "kube-api-access-kbktn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.840895 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory" (OuterVolumeSpecName: "inventory") pod "8a6f4649-48ad-45af-90ca-ddf024c34a33" (UID: "8a6f4649-48ad-45af-90ca-ddf024c34a33"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.842954 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8a6f4649-48ad-45af-90ca-ddf024c34a33" (UID: "8a6f4649-48ad-45af-90ca-ddf024c34a33"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.910572 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbktn\" (UniqueName: \"kubernetes.io/projected/8a6f4649-48ad-45af-90ca-ddf024c34a33-kube-api-access-kbktn\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.910620 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:24 crc kubenswrapper[4967]: I1121 16:10:24.910632 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8a6f4649-48ad-45af-90ca-ddf024c34a33-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.281819 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.281821 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-pg7sh" event={"ID":"8a6f4649-48ad-45af-90ca-ddf024c34a33","Type":"ContainerDied","Data":"88f3444fefb7b4aaf6c08f3788d8f808f77ae9253e494786fd3c6bdc3f560394"} Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.282191 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88f3444fefb7b4aaf6c08f3788d8f808f77ae9253e494786fd3c6bdc3f560394" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.351942 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9"] Nov 21 16:10:25 crc kubenswrapper[4967]: E1121 16:10:25.352562 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6f4649-48ad-45af-90ca-ddf024c34a33" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.352581 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6f4649-48ad-45af-90ca-ddf024c34a33" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.352904 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6f4649-48ad-45af-90ca-ddf024c34a33" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.353834 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.356155 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.358494 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.358624 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.358673 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.363168 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9"] Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.422497 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.422570 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmk2t\" (UniqueName: \"kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.423113 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.525419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.525549 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.525607 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmk2t\" (UniqueName: \"kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.529228 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.530340 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.545996 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmk2t\" (UniqueName: \"kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:25 crc kubenswrapper[4967]: I1121 16:10:25.680960 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.202582 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9"] Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.292870 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" event={"ID":"6559f1f0-c99e-49ba-8108-d57a8bf60d33","Type":"ContainerStarted","Data":"248bd6fa1620372844434d00c47e32b3e7cf421dff2753045bdaa642a5a3039c"} Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.293044 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jdwzg" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="registry-server" containerID="cri-o://1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac" gracePeriod=2 Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.909888 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.966045 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hln8l\" (UniqueName: \"kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l\") pod \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.966577 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content\") pod \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.966941 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities\") pod \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\" (UID: \"58f9fa94-1160-419c-87f2-0b0a51d6d44c\") " Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.967667 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities" (OuterVolumeSpecName: "utilities") pod "58f9fa94-1160-419c-87f2-0b0a51d6d44c" (UID: "58f9fa94-1160-419c-87f2-0b0a51d6d44c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.968012 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.971558 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l" (OuterVolumeSpecName: "kube-api-access-hln8l") pod "58f9fa94-1160-419c-87f2-0b0a51d6d44c" (UID: "58f9fa94-1160-419c-87f2-0b0a51d6d44c"). InnerVolumeSpecName "kube-api-access-hln8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:26 crc kubenswrapper[4967]: I1121 16:10:26.988037 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "58f9fa94-1160-419c-87f2-0b0a51d6d44c" (UID: "58f9fa94-1160-419c-87f2-0b0a51d6d44c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.069874 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hln8l\" (UniqueName: \"kubernetes.io/projected/58f9fa94-1160-419c-87f2-0b0a51d6d44c-kube-api-access-hln8l\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.069905 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/58f9fa94-1160-419c-87f2-0b0a51d6d44c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.305572 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" event={"ID":"6559f1f0-c99e-49ba-8108-d57a8bf60d33","Type":"ContainerStarted","Data":"6c6345601c4c87d09bba76a29611ecbc762762e7a676259da6a92e0171ea342b"} Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.307857 4967 generic.go:334] "Generic (PLEG): container finished" podID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerID="1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac" exitCode=0 Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.307909 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerDied","Data":"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac"} Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.307916 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jdwzg" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.307950 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jdwzg" event={"ID":"58f9fa94-1160-419c-87f2-0b0a51d6d44c","Type":"ContainerDied","Data":"7cab7bbc37cdb13c6576dca1ceb2a196ba58a64a66dad85868ce7a33c23e2157"} Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.307975 4967 scope.go:117] "RemoveContainer" containerID="1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.347680 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" podStartSLOduration=1.885993242 podStartE2EDuration="2.347656087s" podCreationTimestamp="2025-11-21 16:10:25 +0000 UTC" firstStartedPulling="2025-11-21 16:10:26.207543199 +0000 UTC m=+2114.466064207" lastFinishedPulling="2025-11-21 16:10:26.669206044 +0000 UTC m=+2114.927727052" observedRunningTime="2025-11-21 16:10:27.335705473 +0000 UTC m=+2115.594226481" watchObservedRunningTime="2025-11-21 16:10:27.347656087 +0000 UTC m=+2115.606177095" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.359920 4967 scope.go:117] "RemoveContainer" containerID="edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.360413 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.373865 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jdwzg"] Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.379710 4967 scope.go:117] "RemoveContainer" containerID="b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.403032 4967 scope.go:117] "RemoveContainer" containerID="1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac" Nov 21 16:10:27 crc kubenswrapper[4967]: E1121 16:10:27.403518 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac\": container with ID starting with 1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac not found: ID does not exist" containerID="1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.403576 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac"} err="failed to get container status \"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac\": rpc error: code = NotFound desc = could not find container \"1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac\": container with ID starting with 1617496a8500f32074ce75195e7a53c8c921a53e46a9f9c06d638ae67a2e1fac not found: ID does not exist" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.403612 4967 scope.go:117] "RemoveContainer" containerID="edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b" Nov 21 16:10:27 crc kubenswrapper[4967]: E1121 16:10:27.404020 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b\": container with ID starting with edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b not found: ID does not exist" containerID="edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.404065 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b"} err="failed to get container status \"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b\": rpc error: code = NotFound desc = could not find container \"edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b\": container with ID starting with edf1521b971d9f51c6880a9ed75aa81f9f09ee3025d5e85771086d09efc4b23b not found: ID does not exist" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.404091 4967 scope.go:117] "RemoveContainer" containerID="b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2" Nov 21 16:10:27 crc kubenswrapper[4967]: E1121 16:10:27.404472 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2\": container with ID starting with b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2 not found: ID does not exist" containerID="b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2" Nov 21 16:10:27 crc kubenswrapper[4967]: I1121 16:10:27.404525 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2"} err="failed to get container status \"b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2\": rpc error: code = NotFound desc = could not find container \"b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2\": container with ID starting with b70416f54aa9ca027383e69c58f9e9d21f938e437548d715a358f5d4f338bbe2 not found: ID does not exist" Nov 21 16:10:28 crc kubenswrapper[4967]: I1121 16:10:28.553698 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" path="/var/lib/kubelet/pods/58f9fa94-1160-419c-87f2-0b0a51d6d44c/volumes" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.955062 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:10:32 crc kubenswrapper[4967]: E1121 16:10:32.956075 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="extract-utilities" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.956089 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="extract-utilities" Nov 21 16:10:32 crc kubenswrapper[4967]: E1121 16:10:32.956115 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="registry-server" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.956123 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="registry-server" Nov 21 16:10:32 crc kubenswrapper[4967]: E1121 16:10:32.956155 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="extract-content" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.956161 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="extract-content" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.956764 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="58f9fa94-1160-419c-87f2-0b0a51d6d44c" containerName="registry-server" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.958502 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:32 crc kubenswrapper[4967]: I1121 16:10:32.970911 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.007230 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.007348 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrvrs\" (UniqueName: \"kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.007445 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.109682 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.109796 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrvrs\" (UniqueName: \"kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.109867 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.110461 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.110504 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.139694 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrvrs\" (UniqueName: \"kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs\") pod \"community-operators-xbsbm\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.283619 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:33 crc kubenswrapper[4967]: I1121 16:10:33.928875 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:10:34 crc kubenswrapper[4967]: I1121 16:10:34.387250 4967 generic.go:334] "Generic (PLEG): container finished" podID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerID="4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784" exitCode=0 Nov 21 16:10:34 crc kubenswrapper[4967]: I1121 16:10:34.387401 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerDied","Data":"4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784"} Nov 21 16:10:34 crc kubenswrapper[4967]: I1121 16:10:34.387636 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerStarted","Data":"3fa36ee1615004e34dbf68d8b2f048a87d35f1a66460ec085b5c78eb680e6752"} Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.155205 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.158060 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.178770 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.270132 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tch5j\" (UniqueName: \"kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.270187 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.270291 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.372182 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tch5j\" (UniqueName: \"kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.372251 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.372407 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.372884 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.373009 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.400928 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerStarted","Data":"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d"} Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.405431 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tch5j\" (UniqueName: \"kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j\") pod \"redhat-operators-8hps4\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:35 crc kubenswrapper[4967]: I1121 16:10:35.476587 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.024490 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.156473 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.159086 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.180598 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.193815 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btnp7\" (UniqueName: \"kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.193944 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.194199 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.296388 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.296768 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btnp7\" (UniqueName: \"kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.296825 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.297080 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.297100 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.324528 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btnp7\" (UniqueName: \"kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7\") pod \"certified-operators-rzwb9\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.426019 4967 generic.go:334] "Generic (PLEG): container finished" podID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerID="34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5" exitCode=0 Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.427218 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerDied","Data":"34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5"} Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.427259 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerStarted","Data":"f2372047a463578827535f59a8c5ec3ad17f7288887ee4eb850738848f05e6db"} Nov 21 16:10:36 crc kubenswrapper[4967]: I1121 16:10:36.485933 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.208911 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.438135 4967 generic.go:334] "Generic (PLEG): container finished" podID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerID="c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d" exitCode=0 Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.438225 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerDied","Data":"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d"} Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.441157 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerStarted","Data":"98cd4bb961e41bc70804ca95e02ae3b99a6136e885b1c7f9646ecc81c74109bc"} Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.441351 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerStarted","Data":"c5c5643fbc1552738e0aea3cbf4bf24b9845fda2a3546f390322be18ad1c33e1"} Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.443438 4967 generic.go:334] "Generic (PLEG): container finished" podID="6559f1f0-c99e-49ba-8108-d57a8bf60d33" containerID="6c6345601c4c87d09bba76a29611ecbc762762e7a676259da6a92e0171ea342b" exitCode=0 Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.443496 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" event={"ID":"6559f1f0-c99e-49ba-8108-d57a8bf60d33","Type":"ContainerDied","Data":"6c6345601c4c87d09bba76a29611ecbc762762e7a676259da6a92e0171ea342b"} Nov 21 16:10:37 crc kubenswrapper[4967]: I1121 16:10:37.448303 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerStarted","Data":"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b"} Nov 21 16:10:38 crc kubenswrapper[4967]: I1121 16:10:38.462655 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerStarted","Data":"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a"} Nov 21 16:10:38 crc kubenswrapper[4967]: I1121 16:10:38.466324 4967 generic.go:334] "Generic (PLEG): container finished" podID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerID="98cd4bb961e41bc70804ca95e02ae3b99a6136e885b1c7f9646ecc81c74109bc" exitCode=0 Nov 21 16:10:38 crc kubenswrapper[4967]: I1121 16:10:38.466436 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerDied","Data":"98cd4bb961e41bc70804ca95e02ae3b99a6136e885b1c7f9646ecc81c74109bc"} Nov 21 16:10:38 crc kubenswrapper[4967]: I1121 16:10:38.466461 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerStarted","Data":"11fc809f277ed921a6f600a367bfbbcba17aac039870695687369ff5b09f02ef"} Nov 21 16:10:38 crc kubenswrapper[4967]: I1121 16:10:38.493149 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xbsbm" podStartSLOduration=3.042988764 podStartE2EDuration="6.493127946s" podCreationTimestamp="2025-11-21 16:10:32 +0000 UTC" firstStartedPulling="2025-11-21 16:10:34.389062618 +0000 UTC m=+2122.647583616" lastFinishedPulling="2025-11-21 16:10:37.83920178 +0000 UTC m=+2126.097722798" observedRunningTime="2025-11-21 16:10:38.486392292 +0000 UTC m=+2126.744913300" watchObservedRunningTime="2025-11-21 16:10:38.493127946 +0000 UTC m=+2126.751648954" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.024482 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.179973 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmk2t\" (UniqueName: \"kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t\") pod \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.180663 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory\") pod \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.180728 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key\") pod \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\" (UID: \"6559f1f0-c99e-49ba-8108-d57a8bf60d33\") " Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.191541 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t" (OuterVolumeSpecName: "kube-api-access-rmk2t") pod "6559f1f0-c99e-49ba-8108-d57a8bf60d33" (UID: "6559f1f0-c99e-49ba-8108-d57a8bf60d33"). InnerVolumeSpecName "kube-api-access-rmk2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.227905 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6559f1f0-c99e-49ba-8108-d57a8bf60d33" (UID: "6559f1f0-c99e-49ba-8108-d57a8bf60d33"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.228064 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory" (OuterVolumeSpecName: "inventory") pod "6559f1f0-c99e-49ba-8108-d57a8bf60d33" (UID: "6559f1f0-c99e-49ba-8108-d57a8bf60d33"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.285671 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmk2t\" (UniqueName: \"kubernetes.io/projected/6559f1f0-c99e-49ba-8108-d57a8bf60d33-kube-api-access-rmk2t\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.285737 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.285755 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6559f1f0-c99e-49ba-8108-d57a8bf60d33-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.478724 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.478748 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9" event={"ID":"6559f1f0-c99e-49ba-8108-d57a8bf60d33","Type":"ContainerDied","Data":"248bd6fa1620372844434d00c47e32b3e7cf421dff2753045bdaa642a5a3039c"} Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.478790 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="248bd6fa1620372844434d00c47e32b3e7cf421dff2753045bdaa642a5a3039c" Nov 21 16:10:39 crc kubenswrapper[4967]: E1121 16:10:39.557072 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6559f1f0_c99e_49ba_8108_d57a8bf60d33.slice\": RecentStats: unable to find data in memory cache]" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.657774 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b"] Nov 21 16:10:39 crc kubenswrapper[4967]: E1121 16:10:39.658415 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6559f1f0-c99e-49ba-8108-d57a8bf60d33" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.658429 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6559f1f0-c99e-49ba-8108-d57a8bf60d33" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.658656 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6559f1f0-c99e-49ba-8108-d57a8bf60d33" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.659481 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.661934 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.662921 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663024 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663171 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663387 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663528 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663643 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.663871 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.664469 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.672634 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b"] Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.802722 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.802792 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.802830 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.802875 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.802900 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803145 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803203 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803606 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803679 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjkx2\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803771 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803830 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803905 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.803980 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.804100 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.804122 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906249 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906698 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906745 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjkx2\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906772 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906825 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906876 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906930 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.906999 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907026 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907052 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907082 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907153 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907180 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907245 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.907271 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.913461 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.913524 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.914477 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.914895 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.914923 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.917570 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.922579 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.927972 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjkx2\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.957430 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.979167 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.982341 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.982398 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.983100 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.985389 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.985440 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:39 crc kubenswrapper[4967]: I1121 16:10:39.991607 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:40 crc kubenswrapper[4967]: I1121 16:10:40.277685 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:10:40 crc kubenswrapper[4967]: I1121 16:10:40.868292 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b"] Nov 21 16:10:41 crc kubenswrapper[4967]: I1121 16:10:41.508820 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" event={"ID":"4aaa5027-a173-4854-90e9-69635bd3cd76","Type":"ContainerStarted","Data":"b63c26233ed87944f62e42a4a63ce7fa5c3aab839831869396437f0e32d57d90"} Nov 21 16:10:42 crc kubenswrapper[4967]: I1121 16:10:42.521282 4967 generic.go:334] "Generic (PLEG): container finished" podID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerID="11fc809f277ed921a6f600a367bfbbcba17aac039870695687369ff5b09f02ef" exitCode=0 Nov 21 16:10:42 crc kubenswrapper[4967]: I1121 16:10:42.521341 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerDied","Data":"11fc809f277ed921a6f600a367bfbbcba17aac039870695687369ff5b09f02ef"} Nov 21 16:10:42 crc kubenswrapper[4967]: I1121 16:10:42.523792 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" event={"ID":"4aaa5027-a173-4854-90e9-69635bd3cd76","Type":"ContainerStarted","Data":"9fc2e9c0f0761ae504dd635f4a36f1fd28509fb526143db39b597d6fa238bc77"} Nov 21 16:10:42 crc kubenswrapper[4967]: I1121 16:10:42.569406 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" podStartSLOduration=3.068719423 podStartE2EDuration="3.569367602s" podCreationTimestamp="2025-11-21 16:10:39 +0000 UTC" firstStartedPulling="2025-11-21 16:10:40.872764056 +0000 UTC m=+2129.131285064" lastFinishedPulling="2025-11-21 16:10:41.373412235 +0000 UTC m=+2129.631933243" observedRunningTime="2025-11-21 16:10:42.559970472 +0000 UTC m=+2130.818491480" watchObservedRunningTime="2025-11-21 16:10:42.569367602 +0000 UTC m=+2130.827888610" Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.284108 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.284777 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.539352 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerStarted","Data":"5952c91a820691a99349b89e830c8d14205881966b9239486acd71c34ddb48c9"} Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.542058 4967 generic.go:334] "Generic (PLEG): container finished" podID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerID="28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b" exitCode=0 Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.542169 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerDied","Data":"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b"} Nov 21 16:10:43 crc kubenswrapper[4967]: I1121 16:10:43.562573 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rzwb9" podStartSLOduration=2.048943635 podStartE2EDuration="7.562547215s" podCreationTimestamp="2025-11-21 16:10:36 +0000 UTC" firstStartedPulling="2025-11-21 16:10:37.442878988 +0000 UTC m=+2125.701399996" lastFinishedPulling="2025-11-21 16:10:42.956482568 +0000 UTC m=+2131.215003576" observedRunningTime="2025-11-21 16:10:43.558116387 +0000 UTC m=+2131.816637415" watchObservedRunningTime="2025-11-21 16:10:43.562547215 +0000 UTC m=+2131.821068253" Nov 21 16:10:44 crc kubenswrapper[4967]: I1121 16:10:44.339886 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xbsbm" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" probeResult="failure" output=< Nov 21 16:10:44 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:10:44 crc kubenswrapper[4967]: > Nov 21 16:10:44 crc kubenswrapper[4967]: I1121 16:10:44.553232 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerStarted","Data":"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048"} Nov 21 16:10:44 crc kubenswrapper[4967]: I1121 16:10:44.583490 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8hps4" podStartSLOduration=1.820272184 podStartE2EDuration="9.583470878s" podCreationTimestamp="2025-11-21 16:10:35 +0000 UTC" firstStartedPulling="2025-11-21 16:10:36.427810923 +0000 UTC m=+2124.686331931" lastFinishedPulling="2025-11-21 16:10:44.191009617 +0000 UTC m=+2132.449530625" observedRunningTime="2025-11-21 16:10:44.57276739 +0000 UTC m=+2132.831288398" watchObservedRunningTime="2025-11-21 16:10:44.583470878 +0000 UTC m=+2132.841991886" Nov 21 16:10:45 crc kubenswrapper[4967]: I1121 16:10:45.477706 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:45 crc kubenswrapper[4967]: I1121 16:10:45.478030 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.487833 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.487917 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.521784 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.521856 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.521917 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.522806 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.522886 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e" gracePeriod=600 Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.527439 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8hps4" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="registry-server" probeResult="failure" output=< Nov 21 16:10:46 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:10:46 crc kubenswrapper[4967]: > Nov 21 16:10:46 crc kubenswrapper[4967]: I1121 16:10:46.555657 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:47 crc kubenswrapper[4967]: I1121 16:10:47.589403 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e" exitCode=0 Nov 21 16:10:47 crc kubenswrapper[4967]: I1121 16:10:47.589485 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e"} Nov 21 16:10:47 crc kubenswrapper[4967]: I1121 16:10:47.589741 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8"} Nov 21 16:10:47 crc kubenswrapper[4967]: I1121 16:10:47.589762 4967 scope.go:117] "RemoveContainer" containerID="14627c4d6fed57db78e31834bdb7a49b27b296c61e1f29d7981a4860dd84380a" Nov 21 16:10:54 crc kubenswrapper[4967]: I1121 16:10:54.337553 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xbsbm" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" probeResult="failure" output=< Nov 21 16:10:54 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:10:54 crc kubenswrapper[4967]: > Nov 21 16:10:55 crc kubenswrapper[4967]: I1121 16:10:55.525451 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:55 crc kubenswrapper[4967]: I1121 16:10:55.588528 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:55 crc kubenswrapper[4967]: I1121 16:10:55.761471 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:56 crc kubenswrapper[4967]: I1121 16:10:56.550564 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:10:56 crc kubenswrapper[4967]: I1121 16:10:56.681728 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8hps4" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="registry-server" containerID="cri-o://eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048" gracePeriod=2 Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.351338 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.425612 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") pod \"5bbab54e-b429-427f-b8df-19a6d056dbb5\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.425742 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tch5j\" (UniqueName: \"kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j\") pod \"5bbab54e-b429-427f-b8df-19a6d056dbb5\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.425878 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities\") pod \"5bbab54e-b429-427f-b8df-19a6d056dbb5\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.426761 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities" (OuterVolumeSpecName: "utilities") pod "5bbab54e-b429-427f-b8df-19a6d056dbb5" (UID: "5bbab54e-b429-427f-b8df-19a6d056dbb5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.433623 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j" (OuterVolumeSpecName: "kube-api-access-tch5j") pod "5bbab54e-b429-427f-b8df-19a6d056dbb5" (UID: "5bbab54e-b429-427f-b8df-19a6d056dbb5"). InnerVolumeSpecName "kube-api-access-tch5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.528117 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5bbab54e-b429-427f-b8df-19a6d056dbb5" (UID: "5bbab54e-b429-427f-b8df-19a6d056dbb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.528854 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") pod \"5bbab54e-b429-427f-b8df-19a6d056dbb5\" (UID: \"5bbab54e-b429-427f-b8df-19a6d056dbb5\") " Nov 21 16:10:57 crc kubenswrapper[4967]: W1121 16:10:57.529022 4967 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/5bbab54e-b429-427f-b8df-19a6d056dbb5/volumes/kubernetes.io~empty-dir/catalog-content Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.529054 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5bbab54e-b429-427f-b8df-19a6d056dbb5" (UID: "5bbab54e-b429-427f-b8df-19a6d056dbb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.529731 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.529758 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tch5j\" (UniqueName: \"kubernetes.io/projected/5bbab54e-b429-427f-b8df-19a6d056dbb5-kube-api-access-tch5j\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.529772 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5bbab54e-b429-427f-b8df-19a6d056dbb5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.694259 4967 generic.go:334] "Generic (PLEG): container finished" podID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerID="eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048" exitCode=0 Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.694304 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerDied","Data":"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048"} Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.694365 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hps4" event={"ID":"5bbab54e-b429-427f-b8df-19a6d056dbb5","Type":"ContainerDied","Data":"f2372047a463578827535f59a8c5ec3ad17f7288887ee4eb850738848f05e6db"} Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.694383 4967 scope.go:117] "RemoveContainer" containerID="eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.694400 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hps4" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.719134 4967 scope.go:117] "RemoveContainer" containerID="28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.725816 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.735031 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8hps4"] Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.765902 4967 scope.go:117] "RemoveContainer" containerID="34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.797795 4967 scope.go:117] "RemoveContainer" containerID="eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048" Nov 21 16:10:57 crc kubenswrapper[4967]: E1121 16:10:57.798301 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048\": container with ID starting with eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048 not found: ID does not exist" containerID="eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.798362 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048"} err="failed to get container status \"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048\": rpc error: code = NotFound desc = could not find container \"eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048\": container with ID starting with eec6ee445ae4506e2e2220f0cf6cdb1e7c76f021822082588e68f9764b684048 not found: ID does not exist" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.798395 4967 scope.go:117] "RemoveContainer" containerID="28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b" Nov 21 16:10:57 crc kubenswrapper[4967]: E1121 16:10:57.798773 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b\": container with ID starting with 28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b not found: ID does not exist" containerID="28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.798889 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b"} err="failed to get container status \"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b\": rpc error: code = NotFound desc = could not find container \"28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b\": container with ID starting with 28ad619e9e5f0e9838c55ba786dff786798cfb1da8b43386be8efabe7c0ca85b not found: ID does not exist" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.798997 4967 scope.go:117] "RemoveContainer" containerID="34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5" Nov 21 16:10:57 crc kubenswrapper[4967]: E1121 16:10:57.799417 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5\": container with ID starting with 34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5 not found: ID does not exist" containerID="34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5" Nov 21 16:10:57 crc kubenswrapper[4967]: I1121 16:10:57.799477 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5"} err="failed to get container status \"34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5\": rpc error: code = NotFound desc = could not find container \"34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5\": container with ID starting with 34413045a4a2b5114bc921b1f31bfe9771e2d80b0fdf752308a8b25b873f28e5 not found: ID does not exist" Nov 21 16:10:58 crc kubenswrapper[4967]: I1121 16:10:58.552436 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" path="/var/lib/kubelet/pods/5bbab54e-b429-427f-b8df-19a6d056dbb5/volumes" Nov 21 16:10:58 crc kubenswrapper[4967]: I1121 16:10:58.763945 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:10:58 crc kubenswrapper[4967]: I1121 16:10:58.764270 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rzwb9" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="registry-server" containerID="cri-o://5952c91a820691a99349b89e830c8d14205881966b9239486acd71c34ddb48c9" gracePeriod=2 Nov 21 16:10:59 crc kubenswrapper[4967]: I1121 16:10:59.720035 4967 generic.go:334] "Generic (PLEG): container finished" podID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerID="5952c91a820691a99349b89e830c8d14205881966b9239486acd71c34ddb48c9" exitCode=0 Nov 21 16:10:59 crc kubenswrapper[4967]: I1121 16:10:59.720075 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerDied","Data":"5952c91a820691a99349b89e830c8d14205881966b9239486acd71c34ddb48c9"} Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.085847 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.240793 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content\") pod \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.240911 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities\") pod \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.241025 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btnp7\" (UniqueName: \"kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7\") pod \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\" (UID: \"3a928dd6-09f1-42f8-bd56-5e7aec2065ce\") " Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.242407 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities" (OuterVolumeSpecName: "utilities") pod "3a928dd6-09f1-42f8-bd56-5e7aec2065ce" (UID: "3a928dd6-09f1-42f8-bd56-5e7aec2065ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.246732 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7" (OuterVolumeSpecName: "kube-api-access-btnp7") pod "3a928dd6-09f1-42f8-bd56-5e7aec2065ce" (UID: "3a928dd6-09f1-42f8-bd56-5e7aec2065ce"). InnerVolumeSpecName "kube-api-access-btnp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.283709 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a928dd6-09f1-42f8-bd56-5e7aec2065ce" (UID: "3a928dd6-09f1-42f8-bd56-5e7aec2065ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.344542 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.344578 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btnp7\" (UniqueName: \"kubernetes.io/projected/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-kube-api-access-btnp7\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.344590 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a928dd6-09f1-42f8-bd56-5e7aec2065ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.734254 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rzwb9" event={"ID":"3a928dd6-09f1-42f8-bd56-5e7aec2065ce","Type":"ContainerDied","Data":"c5c5643fbc1552738e0aea3cbf4bf24b9845fda2a3546f390322be18ad1c33e1"} Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.734332 4967 scope.go:117] "RemoveContainer" containerID="5952c91a820691a99349b89e830c8d14205881966b9239486acd71c34ddb48c9" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.734363 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rzwb9" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.763850 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.767159 4967 scope.go:117] "RemoveContainer" containerID="11fc809f277ed921a6f600a367bfbbcba17aac039870695687369ff5b09f02ef" Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.774304 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rzwb9"] Nov 21 16:11:00 crc kubenswrapper[4967]: I1121 16:11:00.791365 4967 scope.go:117] "RemoveContainer" containerID="98cd4bb961e41bc70804ca95e02ae3b99a6136e885b1c7f9646ecc81c74109bc" Nov 21 16:11:02 crc kubenswrapper[4967]: I1121 16:11:02.553804 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" path="/var/lib/kubelet/pods/3a928dd6-09f1-42f8-bd56-5e7aec2065ce/volumes" Nov 21 16:11:03 crc kubenswrapper[4967]: I1121 16:11:03.333427 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:11:03 crc kubenswrapper[4967]: I1121 16:11:03.385631 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:11:04 crc kubenswrapper[4967]: I1121 16:11:04.367498 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:11:04 crc kubenswrapper[4967]: I1121 16:11:04.777243 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xbsbm" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" containerID="cri-o://a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a" gracePeriod=2 Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.284651 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.471121 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content\") pod \"2c316792-8144-44cf-81fb-e679fb21e5c8\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.471454 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities\") pod \"2c316792-8144-44cf-81fb-e679fb21e5c8\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.471518 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrvrs\" (UniqueName: \"kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs\") pod \"2c316792-8144-44cf-81fb-e679fb21e5c8\" (UID: \"2c316792-8144-44cf-81fb-e679fb21e5c8\") " Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.473890 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities" (OuterVolumeSpecName: "utilities") pod "2c316792-8144-44cf-81fb-e679fb21e5c8" (UID: "2c316792-8144-44cf-81fb-e679fb21e5c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.479282 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs" (OuterVolumeSpecName: "kube-api-access-jrvrs") pod "2c316792-8144-44cf-81fb-e679fb21e5c8" (UID: "2c316792-8144-44cf-81fb-e679fb21e5c8"). InnerVolumeSpecName "kube-api-access-jrvrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.528776 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c316792-8144-44cf-81fb-e679fb21e5c8" (UID: "2c316792-8144-44cf-81fb-e679fb21e5c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.574814 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.575292 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c316792-8144-44cf-81fb-e679fb21e5c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.575303 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrvrs\" (UniqueName: \"kubernetes.io/projected/2c316792-8144-44cf-81fb-e679fb21e5c8-kube-api-access-jrvrs\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.790066 4967 generic.go:334] "Generic (PLEG): container finished" podID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerID="a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a" exitCode=0 Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.790117 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerDied","Data":"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a"} Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.790150 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xbsbm" event={"ID":"2c316792-8144-44cf-81fb-e679fb21e5c8","Type":"ContainerDied","Data":"3fa36ee1615004e34dbf68d8b2f048a87d35f1a66460ec085b5c78eb680e6752"} Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.790142 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xbsbm" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.790165 4967 scope.go:117] "RemoveContainer" containerID="a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.813558 4967 scope.go:117] "RemoveContainer" containerID="c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.825945 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.836027 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xbsbm"] Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.852554 4967 scope.go:117] "RemoveContainer" containerID="4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.893678 4967 scope.go:117] "RemoveContainer" containerID="a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a" Nov 21 16:11:05 crc kubenswrapper[4967]: E1121 16:11:05.894201 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a\": container with ID starting with a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a not found: ID does not exist" containerID="a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.894255 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a"} err="failed to get container status \"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a\": rpc error: code = NotFound desc = could not find container \"a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a\": container with ID starting with a601a81c87b21ca9fe450acd2a279da637bf8eced756f5d125ba6a4aefe6090a not found: ID does not exist" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.894293 4967 scope.go:117] "RemoveContainer" containerID="c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d" Nov 21 16:11:05 crc kubenswrapper[4967]: E1121 16:11:05.894689 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d\": container with ID starting with c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d not found: ID does not exist" containerID="c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.894749 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d"} err="failed to get container status \"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d\": rpc error: code = NotFound desc = could not find container \"c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d\": container with ID starting with c399d482170dba274282db2ea3ad684a998a71b68c27db7c09a6312a00708b4d not found: ID does not exist" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.894788 4967 scope.go:117] "RemoveContainer" containerID="4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784" Nov 21 16:11:05 crc kubenswrapper[4967]: E1121 16:11:05.895358 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784\": container with ID starting with 4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784 not found: ID does not exist" containerID="4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784" Nov 21 16:11:05 crc kubenswrapper[4967]: I1121 16:11:05.895387 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784"} err="failed to get container status \"4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784\": rpc error: code = NotFound desc = could not find container \"4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784\": container with ID starting with 4e4210a3e77224cd05854328c5e4290074c30ee725917298a643420292221784 not found: ID does not exist" Nov 21 16:11:06 crc kubenswrapper[4967]: I1121 16:11:06.549440 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" path="/var/lib/kubelet/pods/2c316792-8144-44cf-81fb-e679fb21e5c8/volumes" Nov 21 16:11:24 crc kubenswrapper[4967]: I1121 16:11:24.993742 4967 generic.go:334] "Generic (PLEG): container finished" podID="4aaa5027-a173-4854-90e9-69635bd3cd76" containerID="9fc2e9c0f0761ae504dd635f4a36f1fd28509fb526143db39b597d6fa238bc77" exitCode=0 Nov 21 16:11:24 crc kubenswrapper[4967]: I1121 16:11:24.993812 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" event={"ID":"4aaa5027-a173-4854-90e9-69635bd3cd76","Type":"ContainerDied","Data":"9fc2e9c0f0761ae504dd635f4a36f1fd28509fb526143db39b597d6fa238bc77"} Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.454600 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596514 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596573 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596630 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596735 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjkx2\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596822 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596922 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596943 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596960 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.596982 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597028 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597071 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597101 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597122 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597144 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597177 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.597196 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key\") pod \"4aaa5027-a173-4854-90e9-69635bd3cd76\" (UID: \"4aaa5027-a173-4854-90e9-69635bd3cd76\") " Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.603044 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.604011 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.604019 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.604659 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.606279 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.606841 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.606888 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.607889 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.608460 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.608536 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.609254 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2" (OuterVolumeSpecName: "kube-api-access-xjkx2") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "kube-api-access-xjkx2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.609837 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.610592 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.611419 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.636806 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.637449 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory" (OuterVolumeSpecName: "inventory") pod "4aaa5027-a173-4854-90e9-69635bd3cd76" (UID: "4aaa5027-a173-4854-90e9-69635bd3cd76"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710013 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710053 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710071 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710082 4967 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710092 4967 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710101 4967 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710113 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710122 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710132 4967 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710141 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710154 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjkx2\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-kube-api-access-xjkx2\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710163 4967 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710172 4967 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710184 4967 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710195 4967 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4aaa5027-a173-4854-90e9-69635bd3cd76-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:26 crc kubenswrapper[4967]: I1121 16:11:26.710206 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/4aaa5027-a173-4854-90e9-69635bd3cd76-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.015586 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" event={"ID":"4aaa5027-a173-4854-90e9-69635bd3cd76","Type":"ContainerDied","Data":"b63c26233ed87944f62e42a4a63ce7fa5c3aab839831869396437f0e32d57d90"} Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.015627 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b63c26233ed87944f62e42a4a63ce7fa5c3aab839831869396437f0e32d57d90" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.016039 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.160098 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg"] Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.160875 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.160894 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.160921 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.160929 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.160950 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.160956 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.160965 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.160971 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.160998 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161004 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="extract-content" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.161017 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161022 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.161034 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161039 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.161065 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161071 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="extract-utilities" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.161079 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161085 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: E1121 16:11:27.161098 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4aaa5027-a173-4854-90e9-69635bd3cd76" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161113 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4aaa5027-a173-4854-90e9-69635bd3cd76" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161341 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bbab54e-b429-427f-b8df-19a6d056dbb5" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161360 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a928dd6-09f1-42f8-bd56-5e7aec2065ce" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161376 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4aaa5027-a173-4854-90e9-69635bd3cd76" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.161389 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c316792-8144-44cf-81fb-e679fb21e5c8" containerName="registry-server" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.162166 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.166923 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.167229 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.167256 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.167408 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.171750 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg"] Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.174720 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.326250 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.326682 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.326892 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.327083 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zvxb\" (UniqueName: \"kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.327126 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.429664 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.429772 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.429872 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zvxb\" (UniqueName: \"kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.429901 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.430042 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.430842 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.433731 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.434512 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.435751 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.446529 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zvxb\" (UniqueName: \"kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-w5klg\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:27 crc kubenswrapper[4967]: I1121 16:11:27.478634 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:11:28 crc kubenswrapper[4967]: I1121 16:11:28.023167 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg"] Nov 21 16:11:28 crc kubenswrapper[4967]: W1121 16:11:28.034497 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19f7acab_a57b_4fcb_b66b_e988058d14ae.slice/crio-9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012 WatchSource:0}: Error finding container 9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012: Status 404 returned error can't find the container with id 9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012 Nov 21 16:11:29 crc kubenswrapper[4967]: I1121 16:11:29.051297 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" event={"ID":"19f7acab-a57b-4fcb-b66b-e988058d14ae","Type":"ContainerStarted","Data":"41034e6ca7d57fe02fe16ae0b09721de720c229b2a60318442d9e71bdf41b85b"} Nov 21 16:11:29 crc kubenswrapper[4967]: I1121 16:11:29.051854 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" event={"ID":"19f7acab-a57b-4fcb-b66b-e988058d14ae","Type":"ContainerStarted","Data":"9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012"} Nov 21 16:11:29 crc kubenswrapper[4967]: I1121 16:11:29.074006 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" podStartSLOduration=1.54439877 podStartE2EDuration="2.073987102s" podCreationTimestamp="2025-11-21 16:11:27 +0000 UTC" firstStartedPulling="2025-11-21 16:11:28.048563591 +0000 UTC m=+2176.307084599" lastFinishedPulling="2025-11-21 16:11:28.578151923 +0000 UTC m=+2176.836672931" observedRunningTime="2025-11-21 16:11:29.066462805 +0000 UTC m=+2177.324983813" watchObservedRunningTime="2025-11-21 16:11:29.073987102 +0000 UTC m=+2177.332508100" Nov 21 16:12:26 crc kubenswrapper[4967]: I1121 16:12:26.627372 4967 generic.go:334] "Generic (PLEG): container finished" podID="19f7acab-a57b-4fcb-b66b-e988058d14ae" containerID="41034e6ca7d57fe02fe16ae0b09721de720c229b2a60318442d9e71bdf41b85b" exitCode=0 Nov 21 16:12:26 crc kubenswrapper[4967]: I1121 16:12:26.627474 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" event={"ID":"19f7acab-a57b-4fcb-b66b-e988058d14ae","Type":"ContainerDied","Data":"41034e6ca7d57fe02fe16ae0b09721de720c229b2a60318442d9e71bdf41b85b"} Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.106996 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.164605 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key\") pod \"19f7acab-a57b-4fcb-b66b-e988058d14ae\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.164884 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0\") pod \"19f7acab-a57b-4fcb-b66b-e988058d14ae\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.164909 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zvxb\" (UniqueName: \"kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb\") pod \"19f7acab-a57b-4fcb-b66b-e988058d14ae\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.164982 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle\") pod \"19f7acab-a57b-4fcb-b66b-e988058d14ae\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.165009 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory\") pod \"19f7acab-a57b-4fcb-b66b-e988058d14ae\" (UID: \"19f7acab-a57b-4fcb-b66b-e988058d14ae\") " Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.180710 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb" (OuterVolumeSpecName: "kube-api-access-8zvxb") pod "19f7acab-a57b-4fcb-b66b-e988058d14ae" (UID: "19f7acab-a57b-4fcb-b66b-e988058d14ae"). InnerVolumeSpecName "kube-api-access-8zvxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.181546 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "19f7acab-a57b-4fcb-b66b-e988058d14ae" (UID: "19f7acab-a57b-4fcb-b66b-e988058d14ae"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.197876 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "19f7acab-a57b-4fcb-b66b-e988058d14ae" (UID: "19f7acab-a57b-4fcb-b66b-e988058d14ae"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.204117 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "19f7acab-a57b-4fcb-b66b-e988058d14ae" (UID: "19f7acab-a57b-4fcb-b66b-e988058d14ae"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.211170 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory" (OuterVolumeSpecName: "inventory") pod "19f7acab-a57b-4fcb-b66b-e988058d14ae" (UID: "19f7acab-a57b-4fcb-b66b-e988058d14ae"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.269079 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.269160 4967 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.269180 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zvxb\" (UniqueName: \"kubernetes.io/projected/19f7acab-a57b-4fcb-b66b-e988058d14ae-kube-api-access-8zvxb\") on node \"crc\" DevicePath \"\"" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.269193 4967 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.269206 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19f7acab-a57b-4fcb-b66b-e988058d14ae-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.649331 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" event={"ID":"19f7acab-a57b-4fcb-b66b-e988058d14ae","Type":"ContainerDied","Data":"9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012"} Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.649371 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f9cecf6dd9f16391f3ba0da4029a0b461afeacd5137b12a735cfbea359c6012" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.649392 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-w5klg" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.732176 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv"] Nov 21 16:12:28 crc kubenswrapper[4967]: E1121 16:12:28.732668 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19f7acab-a57b-4fcb-b66b-e988058d14ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.732685 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="19f7acab-a57b-4fcb-b66b-e988058d14ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.732901 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="19f7acab-a57b-4fcb-b66b-e988058d14ae" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.733778 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.735946 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.736148 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.736486 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.736612 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.736745 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.745109 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.748171 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv"] Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.781290 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.781360 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.781405 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wvxz\" (UniqueName: \"kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.781541 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.782076 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.782423 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885413 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885569 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885614 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885647 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885736 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wvxz\" (UniqueName: \"kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.885794 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.889744 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.889659 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.890612 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.890961 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.891038 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:28 crc kubenswrapper[4967]: I1121 16:12:28.903624 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wvxz\" (UniqueName: \"kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:29 crc kubenswrapper[4967]: I1121 16:12:29.061464 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:12:29 crc kubenswrapper[4967]: I1121 16:12:29.578227 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv"] Nov 21 16:12:29 crc kubenswrapper[4967]: I1121 16:12:29.583487 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:12:29 crc kubenswrapper[4967]: I1121 16:12:29.660751 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" event={"ID":"492f2f9f-3f85-4fdd-a247-15d403c3bb87","Type":"ContainerStarted","Data":"bcd61e48128a340a68958fab562fcceaf86a5f4c24d93dc378d3b30de11ad643"} Nov 21 16:12:30 crc kubenswrapper[4967]: I1121 16:12:30.672279 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" event={"ID":"492f2f9f-3f85-4fdd-a247-15d403c3bb87","Type":"ContainerStarted","Data":"92316bf31b7643dbd32980e1caf1ed8c3e13f94a14bb85390280e85a480ba2d8"} Nov 21 16:12:30 crc kubenswrapper[4967]: I1121 16:12:30.694224 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" podStartSLOduration=2.251226534 podStartE2EDuration="2.694207198s" podCreationTimestamp="2025-11-21 16:12:28 +0000 UTC" firstStartedPulling="2025-11-21 16:12:29.58318493 +0000 UTC m=+2237.841705938" lastFinishedPulling="2025-11-21 16:12:30.026165594 +0000 UTC m=+2238.284686602" observedRunningTime="2025-11-21 16:12:30.693376034 +0000 UTC m=+2238.951897042" watchObservedRunningTime="2025-11-21 16:12:30.694207198 +0000 UTC m=+2238.952728226" Nov 21 16:13:15 crc kubenswrapper[4967]: I1121 16:13:15.148636 4967 generic.go:334] "Generic (PLEG): container finished" podID="492f2f9f-3f85-4fdd-a247-15d403c3bb87" containerID="92316bf31b7643dbd32980e1caf1ed8c3e13f94a14bb85390280e85a480ba2d8" exitCode=0 Nov 21 16:13:15 crc kubenswrapper[4967]: I1121 16:13:15.148724 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" event={"ID":"492f2f9f-3f85-4fdd-a247-15d403c3bb87","Type":"ContainerDied","Data":"92316bf31b7643dbd32980e1caf1ed8c3e13f94a14bb85390280e85a480ba2d8"} Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.522037 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.522385 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.642061 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760439 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760497 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wvxz\" (UniqueName: \"kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760632 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760709 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760795 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.760950 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0\") pod \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\" (UID: \"492f2f9f-3f85-4fdd-a247-15d403c3bb87\") " Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.766451 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.770593 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz" (OuterVolumeSpecName: "kube-api-access-2wvxz") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "kube-api-access-2wvxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.797339 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.798135 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.800971 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.802572 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory" (OuterVolumeSpecName: "inventory") pod "492f2f9f-3f85-4fdd-a247-15d403c3bb87" (UID: "492f2f9f-3f85-4fdd-a247-15d403c3bb87"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868458 4967 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868498 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868508 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wvxz\" (UniqueName: \"kubernetes.io/projected/492f2f9f-3f85-4fdd-a247-15d403c3bb87-kube-api-access-2wvxz\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868518 4967 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868528 4967 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:16 crc kubenswrapper[4967]: I1121 16:13:16.868536 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/492f2f9f-3f85-4fdd-a247-15d403c3bb87-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.178213 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" event={"ID":"492f2f9f-3f85-4fdd-a247-15d403c3bb87","Type":"ContainerDied","Data":"bcd61e48128a340a68958fab562fcceaf86a5f4c24d93dc378d3b30de11ad643"} Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.178258 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcd61e48128a340a68958fab562fcceaf86a5f4c24d93dc378d3b30de11ad643" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.178368 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.254360 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq"] Nov 21 16:13:17 crc kubenswrapper[4967]: E1121 16:13:17.254943 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="492f2f9f-3f85-4fdd-a247-15d403c3bb87" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.254971 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="492f2f9f-3f85-4fdd-a247-15d403c3bb87" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.255207 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="492f2f9f-3f85-4fdd-a247-15d403c3bb87" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.256011 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.258573 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.258709 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.259081 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.259203 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.259292 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.284005 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq"] Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.378929 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.379078 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpfjn\" (UniqueName: \"kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.379154 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.379186 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.379205 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.481987 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.482083 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpfjn\" (UniqueName: \"kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.482157 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.482194 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.482221 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.486826 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.486900 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.487231 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.487453 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.500776 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpfjn\" (UniqueName: \"kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:17 crc kubenswrapper[4967]: I1121 16:13:17.579120 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:13:18 crc kubenswrapper[4967]: I1121 16:13:18.153871 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq"] Nov 21 16:13:18 crc kubenswrapper[4967]: I1121 16:13:18.189406 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" event={"ID":"1f68112d-a2c4-44ac-92bb-c24db6e767c0","Type":"ContainerStarted","Data":"1e1102b373c3f21700a4d0f9f2370ac1be25b2b3f6e8da0db363b7955673637d"} Nov 21 16:13:19 crc kubenswrapper[4967]: I1121 16:13:19.201736 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" event={"ID":"1f68112d-a2c4-44ac-92bb-c24db6e767c0","Type":"ContainerStarted","Data":"5505ecbf487eef97da86ee123e96640f7ef3878dbd5f1cac111da97774dbf798"} Nov 21 16:13:19 crc kubenswrapper[4967]: I1121 16:13:19.222548 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" podStartSLOduration=1.724968161 podStartE2EDuration="2.222515779s" podCreationTimestamp="2025-11-21 16:13:17 +0000 UTC" firstStartedPulling="2025-11-21 16:13:18.156837278 +0000 UTC m=+2286.415358286" lastFinishedPulling="2025-11-21 16:13:18.654384896 +0000 UTC m=+2286.912905904" observedRunningTime="2025-11-21 16:13:19.215974391 +0000 UTC m=+2287.474495399" watchObservedRunningTime="2025-11-21 16:13:19.222515779 +0000 UTC m=+2287.481036787" Nov 21 16:13:46 crc kubenswrapper[4967]: I1121 16:13:46.523065 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:13:46 crc kubenswrapper[4967]: I1121 16:13:46.523567 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.521743 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.523492 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.523571 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.524524 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.524597 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" gracePeriod=600 Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.770364 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" exitCode=0 Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.770444 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8"} Nov 21 16:14:16 crc kubenswrapper[4967]: I1121 16:14:16.770527 4967 scope.go:117] "RemoveContainer" containerID="3a70b103f587ca806a5d8c593187d79e87e580cfd34b5f2ccc6278b03129472e" Nov 21 16:14:17 crc kubenswrapper[4967]: E1121 16:14:17.163753 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:14:17 crc kubenswrapper[4967]: I1121 16:14:17.791019 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:14:17 crc kubenswrapper[4967]: E1121 16:14:17.791565 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:14:30 crc kubenswrapper[4967]: I1121 16:14:30.536464 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:14:30 crc kubenswrapper[4967]: E1121 16:14:30.538085 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:14:41 crc kubenswrapper[4967]: I1121 16:14:41.536020 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:14:41 crc kubenswrapper[4967]: E1121 16:14:41.536899 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:14:56 crc kubenswrapper[4967]: I1121 16:14:56.536683 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:14:56 crc kubenswrapper[4967]: E1121 16:14:56.537429 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.154006 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh"] Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.156839 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.159888 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.161479 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.171293 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh"] Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.267268 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6bxq\" (UniqueName: \"kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.267488 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.267538 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.369662 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6bxq\" (UniqueName: \"kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.369870 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.369924 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.370902 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.381619 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.417294 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6bxq\" (UniqueName: \"kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq\") pod \"collect-profiles-29395695-476vh\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.491077 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:00 crc kubenswrapper[4967]: I1121 16:15:00.954464 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh"] Nov 21 16:15:00 crc kubenswrapper[4967]: W1121 16:15:00.957230 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86aecfaf_c4d7_408b_85f2_fa9a09152d7d.slice/crio-135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201 WatchSource:0}: Error finding container 135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201: Status 404 returned error can't find the container with id 135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201 Nov 21 16:15:01 crc kubenswrapper[4967]: I1121 16:15:01.286350 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" event={"ID":"86aecfaf-c4d7-408b-85f2-fa9a09152d7d","Type":"ContainerStarted","Data":"7af3ba61af5de3b05d3c35f71d319784aa585c1b40396d014ca3569c6ff09949"} Nov 21 16:15:01 crc kubenswrapper[4967]: I1121 16:15:01.286730 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" event={"ID":"86aecfaf-c4d7-408b-85f2-fa9a09152d7d","Type":"ContainerStarted","Data":"135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201"} Nov 21 16:15:01 crc kubenswrapper[4967]: I1121 16:15:01.312227 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" podStartSLOduration=1.312209781 podStartE2EDuration="1.312209781s" podCreationTimestamp="2025-11-21 16:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:15:01.303619403 +0000 UTC m=+2389.562140411" watchObservedRunningTime="2025-11-21 16:15:01.312209781 +0000 UTC m=+2389.570730789" Nov 21 16:15:02 crc kubenswrapper[4967]: I1121 16:15:02.298514 4967 generic.go:334] "Generic (PLEG): container finished" podID="86aecfaf-c4d7-408b-85f2-fa9a09152d7d" containerID="7af3ba61af5de3b05d3c35f71d319784aa585c1b40396d014ca3569c6ff09949" exitCode=0 Nov 21 16:15:02 crc kubenswrapper[4967]: I1121 16:15:02.298567 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" event={"ID":"86aecfaf-c4d7-408b-85f2-fa9a09152d7d","Type":"ContainerDied","Data":"7af3ba61af5de3b05d3c35f71d319784aa585c1b40396d014ca3569c6ff09949"} Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.700378 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.756702 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume\") pod \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.756762 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume\") pod \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.756793 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6bxq\" (UniqueName: \"kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq\") pod \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\" (UID: \"86aecfaf-c4d7-408b-85f2-fa9a09152d7d\") " Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.758022 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume" (OuterVolumeSpecName: "config-volume") pod "86aecfaf-c4d7-408b-85f2-fa9a09152d7d" (UID: "86aecfaf-c4d7-408b-85f2-fa9a09152d7d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.763467 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq" (OuterVolumeSpecName: "kube-api-access-w6bxq") pod "86aecfaf-c4d7-408b-85f2-fa9a09152d7d" (UID: "86aecfaf-c4d7-408b-85f2-fa9a09152d7d"). InnerVolumeSpecName "kube-api-access-w6bxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.764037 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "86aecfaf-c4d7-408b-85f2-fa9a09152d7d" (UID: "86aecfaf-c4d7-408b-85f2-fa9a09152d7d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.860456 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.860505 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:15:03 crc kubenswrapper[4967]: I1121 16:15:03.860518 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6bxq\" (UniqueName: \"kubernetes.io/projected/86aecfaf-c4d7-408b-85f2-fa9a09152d7d-kube-api-access-w6bxq\") on node \"crc\" DevicePath \"\"" Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.321892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" event={"ID":"86aecfaf-c4d7-408b-85f2-fa9a09152d7d","Type":"ContainerDied","Data":"135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201"} Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.322157 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="135c69cb4dfbf36ccac03a21a25c80fddf696c4606fc06f0782fc02e9b17c201" Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.321975 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh" Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.380261 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl"] Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.390717 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-txjsl"] Nov 21 16:15:04 crc kubenswrapper[4967]: I1121 16:15:04.552714 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d72fe727-d902-4315-afb6-8a67d9df8c57" path="/var/lib/kubelet/pods/d72fe727-d902-4315-afb6-8a67d9df8c57/volumes" Nov 21 16:15:11 crc kubenswrapper[4967]: I1121 16:15:11.536621 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:15:11 crc kubenswrapper[4967]: E1121 16:15:11.538250 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:15:23 crc kubenswrapper[4967]: I1121 16:15:23.481112 4967 scope.go:117] "RemoveContainer" containerID="793bdbf868b261239c03a538844ba6cd357dd1407fc1a0eb38f06c10d3fef5a0" Nov 21 16:15:26 crc kubenswrapper[4967]: I1121 16:15:26.538350 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:15:26 crc kubenswrapper[4967]: E1121 16:15:26.538874 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:15:40 crc kubenswrapper[4967]: I1121 16:15:40.537144 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:15:40 crc kubenswrapper[4967]: E1121 16:15:40.537827 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:15:52 crc kubenswrapper[4967]: I1121 16:15:52.542963 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:15:52 crc kubenswrapper[4967]: E1121 16:15:52.543858 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:16:04 crc kubenswrapper[4967]: I1121 16:16:04.536369 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:16:04 crc kubenswrapper[4967]: E1121 16:16:04.537192 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:16:16 crc kubenswrapper[4967]: I1121 16:16:16.536617 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:16:16 crc kubenswrapper[4967]: E1121 16:16:16.537534 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:16:30 crc kubenswrapper[4967]: I1121 16:16:30.536663 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:16:30 crc kubenswrapper[4967]: E1121 16:16:30.537491 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:16:42 crc kubenswrapper[4967]: I1121 16:16:42.545588 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:16:42 crc kubenswrapper[4967]: E1121 16:16:42.546893 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:16:55 crc kubenswrapper[4967]: I1121 16:16:55.537007 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:16:55 crc kubenswrapper[4967]: E1121 16:16:55.539097 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:17:07 crc kubenswrapper[4967]: I1121 16:17:07.828948 4967 generic.go:334] "Generic (PLEG): container finished" podID="1f68112d-a2c4-44ac-92bb-c24db6e767c0" containerID="5505ecbf487eef97da86ee123e96640f7ef3878dbd5f1cac111da97774dbf798" exitCode=0 Nov 21 16:17:07 crc kubenswrapper[4967]: I1121 16:17:07.829041 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" event={"ID":"1f68112d-a2c4-44ac-92bb-c24db6e767c0","Type":"ContainerDied","Data":"5505ecbf487eef97da86ee123e96640f7ef3878dbd5f1cac111da97774dbf798"} Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.283198 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.447861 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpfjn\" (UniqueName: \"kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn\") pod \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.448586 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle\") pod \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.448644 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key\") pod \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.448672 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory\") pod \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.448729 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0\") pod \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\" (UID: \"1f68112d-a2c4-44ac-92bb-c24db6e767c0\") " Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.454532 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "1f68112d-a2c4-44ac-92bb-c24db6e767c0" (UID: "1f68112d-a2c4-44ac-92bb-c24db6e767c0"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.455626 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn" (OuterVolumeSpecName: "kube-api-access-vpfjn") pod "1f68112d-a2c4-44ac-92bb-c24db6e767c0" (UID: "1f68112d-a2c4-44ac-92bb-c24db6e767c0"). InnerVolumeSpecName "kube-api-access-vpfjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.483911 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1f68112d-a2c4-44ac-92bb-c24db6e767c0" (UID: "1f68112d-a2c4-44ac-92bb-c24db6e767c0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.488643 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory" (OuterVolumeSpecName: "inventory") pod "1f68112d-a2c4-44ac-92bb-c24db6e767c0" (UID: "1f68112d-a2c4-44ac-92bb-c24db6e767c0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.488710 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "1f68112d-a2c4-44ac-92bb-c24db6e767c0" (UID: "1f68112d-a2c4-44ac-92bb-c24db6e767c0"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.551739 4967 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.551816 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.551828 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.551844 4967 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1f68112d-a2c4-44ac-92bb-c24db6e767c0-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.551880 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpfjn\" (UniqueName: \"kubernetes.io/projected/1f68112d-a2c4-44ac-92bb-c24db6e767c0-kube-api-access-vpfjn\") on node \"crc\" DevicePath \"\"" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.851205 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" event={"ID":"1f68112d-a2c4-44ac-92bb-c24db6e767c0","Type":"ContainerDied","Data":"1e1102b373c3f21700a4d0f9f2370ac1be25b2b3f6e8da0db363b7955673637d"} Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.851243 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e1102b373c3f21700a4d0f9f2370ac1be25b2b3f6e8da0db363b7955673637d" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.851572 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.954909 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5"] Nov 21 16:17:09 crc kubenswrapper[4967]: E1121 16:17:09.955450 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f68112d-a2c4-44ac-92bb-c24db6e767c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.955472 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f68112d-a2c4-44ac-92bb-c24db6e767c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 16:17:09 crc kubenswrapper[4967]: E1121 16:17:09.955524 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86aecfaf-c4d7-408b-85f2-fa9a09152d7d" containerName="collect-profiles" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.955531 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="86aecfaf-c4d7-408b-85f2-fa9a09152d7d" containerName="collect-profiles" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.955759 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f68112d-a2c4-44ac-92bb-c24db6e767c0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.955782 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="86aecfaf-c4d7-408b-85f2-fa9a09152d7d" containerName="collect-profiles" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.956761 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.958847 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.959092 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.959293 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.959560 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.959687 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.959794 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.960480 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 21 16:17:09 crc kubenswrapper[4967]: I1121 16:17:09.975490 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5"] Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.062621 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.062742 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.062861 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.062900 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.062931 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.063008 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.063040 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.063085 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.063121 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts8z4\" (UniqueName: \"kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.164816 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.164882 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.164977 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165015 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts8z4\" (UniqueName: \"kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165051 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165142 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165245 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165272 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.165298 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.166282 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.170988 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.171289 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.171184 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.171693 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.172110 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.174906 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.175148 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.184104 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts8z4\" (UniqueName: \"kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4\") pod \"nova-edpm-deployment-openstack-edpm-ipam-q6jx5\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.284355 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.536929 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:17:10 crc kubenswrapper[4967]: E1121 16:17:10.537562 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.837069 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5"] Nov 21 16:17:10 crc kubenswrapper[4967]: I1121 16:17:10.863407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" event={"ID":"cebe1330-948b-4004-b244-fa4e3e22f1de","Type":"ContainerStarted","Data":"0dedf522539f4ab8a03857f37e66aadee9e643d5e12ee13d5177b155b146b07f"} Nov 21 16:17:11 crc kubenswrapper[4967]: I1121 16:17:11.875894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" event={"ID":"cebe1330-948b-4004-b244-fa4e3e22f1de","Type":"ContainerStarted","Data":"1f4e378634e70ee32696d2a0777af869ebe40aa0e03345a336d6bf5a17981f03"} Nov 21 16:17:11 crc kubenswrapper[4967]: I1121 16:17:11.910526 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" podStartSLOduration=2.501458487 podStartE2EDuration="2.910501379s" podCreationTimestamp="2025-11-21 16:17:09 +0000 UTC" firstStartedPulling="2025-11-21 16:17:10.843405437 +0000 UTC m=+2519.101926445" lastFinishedPulling="2025-11-21 16:17:11.252448329 +0000 UTC m=+2519.510969337" observedRunningTime="2025-11-21 16:17:11.905752534 +0000 UTC m=+2520.164273562" watchObservedRunningTime="2025-11-21 16:17:11.910501379 +0000 UTC m=+2520.169022387" Nov 21 16:17:25 crc kubenswrapper[4967]: I1121 16:17:25.536336 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:17:25 crc kubenswrapper[4967]: E1121 16:17:25.537015 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:17:40 crc kubenswrapper[4967]: I1121 16:17:40.537037 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:17:40 crc kubenswrapper[4967]: E1121 16:17:40.538015 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:17:51 crc kubenswrapper[4967]: I1121 16:17:51.537865 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:17:51 crc kubenswrapper[4967]: E1121 16:17:51.539010 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:18:05 crc kubenswrapper[4967]: I1121 16:18:05.536598 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:18:05 crc kubenswrapper[4967]: E1121 16:18:05.537525 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:18:17 crc kubenswrapper[4967]: I1121 16:18:17.536898 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:18:17 crc kubenswrapper[4967]: E1121 16:18:17.537801 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:18:29 crc kubenswrapper[4967]: I1121 16:18:29.536944 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:18:29 crc kubenswrapper[4967]: E1121 16:18:29.538904 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:18:40 crc kubenswrapper[4967]: I1121 16:18:40.536636 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:18:40 crc kubenswrapper[4967]: E1121 16:18:40.537623 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:18:51 crc kubenswrapper[4967]: I1121 16:18:51.537360 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:18:51 crc kubenswrapper[4967]: E1121 16:18:51.538156 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:19:04 crc kubenswrapper[4967]: I1121 16:19:04.536651 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:19:04 crc kubenswrapper[4967]: E1121 16:19:04.537423 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:19:18 crc kubenswrapper[4967]: I1121 16:19:18.538469 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:19:19 crc kubenswrapper[4967]: I1121 16:19:19.306554 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5"} Nov 21 16:19:42 crc kubenswrapper[4967]: I1121 16:19:42.562867 4967 generic.go:334] "Generic (PLEG): container finished" podID="cebe1330-948b-4004-b244-fa4e3e22f1de" containerID="1f4e378634e70ee32696d2a0777af869ebe40aa0e03345a336d6bf5a17981f03" exitCode=0 Nov 21 16:19:42 crc kubenswrapper[4967]: I1121 16:19:42.563164 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" event={"ID":"cebe1330-948b-4004-b244-fa4e3e22f1de","Type":"ContainerDied","Data":"1f4e378634e70ee32696d2a0777af869ebe40aa0e03345a336d6bf5a17981f03"} Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.122550 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.212922 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.212988 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts8z4\" (UniqueName: \"kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213064 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213146 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213357 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213384 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213485 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213528 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.213553 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key\") pod \"cebe1330-948b-4004-b244-fa4e3e22f1de\" (UID: \"cebe1330-948b-4004-b244-fa4e3e22f1de\") " Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.221483 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.240535 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4" (OuterVolumeSpecName: "kube-api-access-ts8z4") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "kube-api-access-ts8z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.246122 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.247443 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.249469 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.251337 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.263592 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.265796 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory" (OuterVolumeSpecName: "inventory") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.267522 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "cebe1330-948b-4004-b244-fa4e3e22f1de" (UID: "cebe1330-948b-4004-b244-fa4e3e22f1de"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316768 4967 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316807 4967 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316818 4967 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316835 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316844 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316853 4967 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316862 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts8z4\" (UniqueName: \"kubernetes.io/projected/cebe1330-948b-4004-b244-fa4e3e22f1de-kube-api-access-ts8z4\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316871 4967 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.316881 4967 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/cebe1330-948b-4004-b244-fa4e3e22f1de-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.587080 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" event={"ID":"cebe1330-948b-4004-b244-fa4e3e22f1de","Type":"ContainerDied","Data":"0dedf522539f4ab8a03857f37e66aadee9e643d5e12ee13d5177b155b146b07f"} Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.587136 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dedf522539f4ab8a03857f37e66aadee9e643d5e12ee13d5177b155b146b07f" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.587427 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-q6jx5" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.692972 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69"] Nov 21 16:19:44 crc kubenswrapper[4967]: E1121 16:19:44.693536 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cebe1330-948b-4004-b244-fa4e3e22f1de" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.693559 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="cebe1330-948b-4004-b244-fa4e3e22f1de" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.693850 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="cebe1330-948b-4004-b244-fa4e3e22f1de" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.694847 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.697996 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.698111 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.698591 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.698873 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.698885 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.704179 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69"] Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.830412 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.830829 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft5hp\" (UniqueName: \"kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.830878 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.830905 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.830998 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.831032 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.831087 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932648 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft5hp\" (UniqueName: \"kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932704 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932729 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932804 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932829 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.932952 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.936607 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.937063 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.937770 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.938449 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.938695 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.944552 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:44 crc kubenswrapper[4967]: I1121 16:19:44.957895 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft5hp\" (UniqueName: \"kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-6ls69\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:45 crc kubenswrapper[4967]: I1121 16:19:45.021433 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:19:45 crc kubenswrapper[4967]: I1121 16:19:45.565126 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69"] Nov 21 16:19:45 crc kubenswrapper[4967]: I1121 16:19:45.565490 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:19:45 crc kubenswrapper[4967]: I1121 16:19:45.598789 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" event={"ID":"81fb37a5-540d-440d-b0f7-3ba11bad7c42","Type":"ContainerStarted","Data":"f9500739ddc89fbd48aa47e34529589b269e6cf5451d193b88431b9814096dbe"} Nov 21 16:19:46 crc kubenswrapper[4967]: I1121 16:19:46.610775 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" event={"ID":"81fb37a5-540d-440d-b0f7-3ba11bad7c42","Type":"ContainerStarted","Data":"70bbbb16d356ac03b51145d0029f71decb75b6291fb8dc9d58b38fdbdf1347ce"} Nov 21 16:19:46 crc kubenswrapper[4967]: I1121 16:19:46.654099 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" podStartSLOduration=2.212192772 podStartE2EDuration="2.65407281s" podCreationTimestamp="2025-11-21 16:19:44 +0000 UTC" firstStartedPulling="2025-11-21 16:19:45.565234837 +0000 UTC m=+2673.823755845" lastFinishedPulling="2025-11-21 16:19:46.007114875 +0000 UTC m=+2674.265635883" observedRunningTime="2025-11-21 16:19:46.637005733 +0000 UTC m=+2674.895526731" watchObservedRunningTime="2025-11-21 16:19:46.65407281 +0000 UTC m=+2674.912593828" Nov 21 16:21:20 crc kubenswrapper[4967]: I1121 16:21:20.902511 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:20 crc kubenswrapper[4967]: I1121 16:21:20.908470 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:20 crc kubenswrapper[4967]: I1121 16:21:20.944195 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.021846 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkkrj\" (UniqueName: \"kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.021915 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.022011 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.124599 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkkrj\" (UniqueName: \"kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.125176 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.125378 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.125744 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.125949 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.146390 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkkrj\" (UniqueName: \"kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj\") pod \"certified-operators-js9xg\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.241853 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:21 crc kubenswrapper[4967]: I1121 16:21:21.817799 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:22 crc kubenswrapper[4967]: I1121 16:21:22.623987 4967 generic.go:334] "Generic (PLEG): container finished" podID="eff34758-2a27-4567-a0bc-860296edea61" containerID="10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9" exitCode=0 Nov 21 16:21:22 crc kubenswrapper[4967]: I1121 16:21:22.624101 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerDied","Data":"10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9"} Nov 21 16:21:22 crc kubenswrapper[4967]: I1121 16:21:22.624483 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerStarted","Data":"e18c6abed7d77362ec7847db8fced0b23b419ed8f6423fce8870f16d54538e2b"} Nov 21 16:21:24 crc kubenswrapper[4967]: I1121 16:21:24.649779 4967 generic.go:334] "Generic (PLEG): container finished" podID="eff34758-2a27-4567-a0bc-860296edea61" containerID="5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f" exitCode=0 Nov 21 16:21:24 crc kubenswrapper[4967]: I1121 16:21:24.649892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerDied","Data":"5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f"} Nov 21 16:21:25 crc kubenswrapper[4967]: I1121 16:21:25.665651 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerStarted","Data":"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b"} Nov 21 16:21:25 crc kubenswrapper[4967]: I1121 16:21:25.690998 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-js9xg" podStartSLOduration=3.216089069 podStartE2EDuration="5.690966936s" podCreationTimestamp="2025-11-21 16:21:20 +0000 UTC" firstStartedPulling="2025-11-21 16:21:22.626945003 +0000 UTC m=+2770.885466031" lastFinishedPulling="2025-11-21 16:21:25.10182289 +0000 UTC m=+2773.360343898" observedRunningTime="2025-11-21 16:21:25.684845152 +0000 UTC m=+2773.943366150" watchObservedRunningTime="2025-11-21 16:21:25.690966936 +0000 UTC m=+2773.949487954" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.667295 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.670680 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.682486 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.772059 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.772350 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.772547 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f26h5\" (UniqueName: \"kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.876143 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f26h5\" (UniqueName: \"kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.876327 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.876433 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.876907 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.877077 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.896746 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f26h5\" (UniqueName: \"kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5\") pod \"community-operators-d25tg\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:26 crc kubenswrapper[4967]: I1121 16:21:26.994257 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:27 crc kubenswrapper[4967]: I1121 16:21:27.587111 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:27 crc kubenswrapper[4967]: W1121 16:21:27.589325 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2cb4ed6_ba6e_48ef_8c86_10ce0bcfe4c8.slice/crio-c2655555204ce3c004c5ae4548b2b5014b220383e6b85c52ed328000c61ac7dd WatchSource:0}: Error finding container c2655555204ce3c004c5ae4548b2b5014b220383e6b85c52ed328000c61ac7dd: Status 404 returned error can't find the container with id c2655555204ce3c004c5ae4548b2b5014b220383e6b85c52ed328000c61ac7dd Nov 21 16:21:27 crc kubenswrapper[4967]: I1121 16:21:27.700734 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerStarted","Data":"c2655555204ce3c004c5ae4548b2b5014b220383e6b85c52ed328000c61ac7dd"} Nov 21 16:21:28 crc kubenswrapper[4967]: I1121 16:21:28.712976 4967 generic.go:334] "Generic (PLEG): container finished" podID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerID="c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2" exitCode=0 Nov 21 16:21:28 crc kubenswrapper[4967]: I1121 16:21:28.713034 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerDied","Data":"c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2"} Nov 21 16:21:29 crc kubenswrapper[4967]: I1121 16:21:29.725253 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerStarted","Data":"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8"} Nov 21 16:21:31 crc kubenswrapper[4967]: I1121 16:21:31.243945 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:31 crc kubenswrapper[4967]: I1121 16:21:31.244389 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:31 crc kubenswrapper[4967]: I1121 16:21:31.301496 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:31 crc kubenswrapper[4967]: I1121 16:21:31.804264 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:32 crc kubenswrapper[4967]: I1121 16:21:32.456339 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:32 crc kubenswrapper[4967]: I1121 16:21:32.760453 4967 generic.go:334] "Generic (PLEG): container finished" podID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerID="f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8" exitCode=0 Nov 21 16:21:32 crc kubenswrapper[4967]: I1121 16:21:32.760842 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerDied","Data":"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8"} Nov 21 16:21:33 crc kubenswrapper[4967]: I1121 16:21:33.772036 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerStarted","Data":"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141"} Nov 21 16:21:33 crc kubenswrapper[4967]: I1121 16:21:33.772222 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-js9xg" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="registry-server" containerID="cri-o://a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b" gracePeriod=2 Nov 21 16:21:33 crc kubenswrapper[4967]: I1121 16:21:33.799054 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d25tg" podStartSLOduration=3.112924784 podStartE2EDuration="7.799029682s" podCreationTimestamp="2025-11-21 16:21:26 +0000 UTC" firstStartedPulling="2025-11-21 16:21:28.715194795 +0000 UTC m=+2776.973715803" lastFinishedPulling="2025-11-21 16:21:33.401299693 +0000 UTC m=+2781.659820701" observedRunningTime="2025-11-21 16:21:33.794485223 +0000 UTC m=+2782.053006221" watchObservedRunningTime="2025-11-21 16:21:33.799029682 +0000 UTC m=+2782.057550690" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.337906 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.477206 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkkrj\" (UniqueName: \"kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj\") pod \"eff34758-2a27-4567-a0bc-860296edea61\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.477262 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities\") pod \"eff34758-2a27-4567-a0bc-860296edea61\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.477303 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content\") pod \"eff34758-2a27-4567-a0bc-860296edea61\" (UID: \"eff34758-2a27-4567-a0bc-860296edea61\") " Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.478199 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities" (OuterVolumeSpecName: "utilities") pod "eff34758-2a27-4567-a0bc-860296edea61" (UID: "eff34758-2a27-4567-a0bc-860296edea61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.483751 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj" (OuterVolumeSpecName: "kube-api-access-mkkrj") pod "eff34758-2a27-4567-a0bc-860296edea61" (UID: "eff34758-2a27-4567-a0bc-860296edea61"). InnerVolumeSpecName "kube-api-access-mkkrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.524300 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eff34758-2a27-4567-a0bc-860296edea61" (UID: "eff34758-2a27-4567-a0bc-860296edea61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.582599 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkkrj\" (UniqueName: \"kubernetes.io/projected/eff34758-2a27-4567-a0bc-860296edea61-kube-api-access-mkkrj\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.582645 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.582663 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff34758-2a27-4567-a0bc-860296edea61-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.785466 4967 generic.go:334] "Generic (PLEG): container finished" podID="eff34758-2a27-4567-a0bc-860296edea61" containerID="a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b" exitCode=0 Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.785514 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerDied","Data":"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b"} Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.785546 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-js9xg" event={"ID":"eff34758-2a27-4567-a0bc-860296edea61","Type":"ContainerDied","Data":"e18c6abed7d77362ec7847db8fced0b23b419ed8f6423fce8870f16d54538e2b"} Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.785548 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-js9xg" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.785569 4967 scope.go:117] "RemoveContainer" containerID="a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.811560 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.817427 4967 scope.go:117] "RemoveContainer" containerID="5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.822526 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-js9xg"] Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.840285 4967 scope.go:117] "RemoveContainer" containerID="10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.895479 4967 scope.go:117] "RemoveContainer" containerID="a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b" Nov 21 16:21:34 crc kubenswrapper[4967]: E1121 16:21:34.896139 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b\": container with ID starting with a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b not found: ID does not exist" containerID="a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.896186 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b"} err="failed to get container status \"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b\": rpc error: code = NotFound desc = could not find container \"a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b\": container with ID starting with a605fbd8045593a150d6966431e52f7a766c1aa73823c54c55bc79839f33bd9b not found: ID does not exist" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.896215 4967 scope.go:117] "RemoveContainer" containerID="5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f" Nov 21 16:21:34 crc kubenswrapper[4967]: E1121 16:21:34.897628 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f\": container with ID starting with 5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f not found: ID does not exist" containerID="5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.897693 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f"} err="failed to get container status \"5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f\": rpc error: code = NotFound desc = could not find container \"5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f\": container with ID starting with 5e2afeaac41d7eec430b28bf39d4908dd614ca1d465026b90885ac562d7db81f not found: ID does not exist" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.897738 4967 scope.go:117] "RemoveContainer" containerID="10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9" Nov 21 16:21:34 crc kubenswrapper[4967]: E1121 16:21:34.898160 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9\": container with ID starting with 10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9 not found: ID does not exist" containerID="10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9" Nov 21 16:21:34 crc kubenswrapper[4967]: I1121 16:21:34.898204 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9"} err="failed to get container status \"10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9\": rpc error: code = NotFound desc = could not find container \"10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9\": container with ID starting with 10c224d683375efd17cf47cf21b642a94f586829a873cba3a957b234b905e8c9 not found: ID does not exist" Nov 21 16:21:36 crc kubenswrapper[4967]: I1121 16:21:36.556102 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eff34758-2a27-4567-a0bc-860296edea61" path="/var/lib/kubelet/pods/eff34758-2a27-4567-a0bc-860296edea61/volumes" Nov 21 16:21:36 crc kubenswrapper[4967]: I1121 16:21:36.995253 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:36 crc kubenswrapper[4967]: I1121 16:21:36.995341 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:37 crc kubenswrapper[4967]: I1121 16:21:37.055375 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:46 crc kubenswrapper[4967]: I1121 16:21:46.521981 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:21:46 crc kubenswrapper[4967]: I1121 16:21:46.522637 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:21:47 crc kubenswrapper[4967]: I1121 16:21:47.041646 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:47 crc kubenswrapper[4967]: I1121 16:21:47.091924 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:47 crc kubenswrapper[4967]: I1121 16:21:47.928306 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d25tg" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="registry-server" containerID="cri-o://08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141" gracePeriod=2 Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.461545 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.548254 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f26h5\" (UniqueName: \"kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5\") pod \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.548696 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities\") pod \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.548888 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content\") pod \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\" (UID: \"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8\") " Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.549520 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities" (OuterVolumeSpecName: "utilities") pod "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" (UID: "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.549692 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.563912 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5" (OuterVolumeSpecName: "kube-api-access-f26h5") pod "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" (UID: "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8"). InnerVolumeSpecName "kube-api-access-f26h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.604211 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" (UID: "a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.652060 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.652104 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f26h5\" (UniqueName: \"kubernetes.io/projected/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8-kube-api-access-f26h5\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.941737 4967 generic.go:334] "Generic (PLEG): container finished" podID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerID="08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141" exitCode=0 Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.941805 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerDied","Data":"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141"} Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.941850 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d25tg" event={"ID":"a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8","Type":"ContainerDied","Data":"c2655555204ce3c004c5ae4548b2b5014b220383e6b85c52ed328000c61ac7dd"} Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.941872 4967 scope.go:117] "RemoveContainer" containerID="08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141" Nov 21 16:21:48 crc kubenswrapper[4967]: I1121 16:21:48.942117 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d25tg" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.016866 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.028342 4967 scope.go:117] "RemoveContainer" containerID="f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.036652 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d25tg"] Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.070320 4967 scope.go:117] "RemoveContainer" containerID="c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.117625 4967 scope.go:117] "RemoveContainer" containerID="08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141" Nov 21 16:21:49 crc kubenswrapper[4967]: E1121 16:21:49.118128 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141\": container with ID starting with 08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141 not found: ID does not exist" containerID="08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.118240 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141"} err="failed to get container status \"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141\": rpc error: code = NotFound desc = could not find container \"08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141\": container with ID starting with 08dd40dc1cd3561f7db6320d70ade546933604f9700a4a542597f6102bb15141 not found: ID does not exist" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.118322 4967 scope.go:117] "RemoveContainer" containerID="f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8" Nov 21 16:21:49 crc kubenswrapper[4967]: E1121 16:21:49.118649 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8\": container with ID starting with f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8 not found: ID does not exist" containerID="f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.118680 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8"} err="failed to get container status \"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8\": rpc error: code = NotFound desc = could not find container \"f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8\": container with ID starting with f0bc6ea64f11267e8214aaa3a5514482bb7a82080b20bc4dc9b34c47d485a1e8 not found: ID does not exist" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.118707 4967 scope.go:117] "RemoveContainer" containerID="c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2" Nov 21 16:21:49 crc kubenswrapper[4967]: E1121 16:21:49.119081 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2\": container with ID starting with c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2 not found: ID does not exist" containerID="c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2" Nov 21 16:21:49 crc kubenswrapper[4967]: I1121 16:21:49.120559 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2"} err="failed to get container status \"c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2\": rpc error: code = NotFound desc = could not find container \"c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2\": container with ID starting with c938d5531ca51ef9fff6536a0d7e998521921cc454e161c6aad20d5b5413bdd2 not found: ID does not exist" Nov 21 16:21:50 crc kubenswrapper[4967]: I1121 16:21:50.548570 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" path="/var/lib/kubelet/pods/a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8/volumes" Nov 21 16:21:53 crc kubenswrapper[4967]: I1121 16:21:53.995967 4967 generic.go:334] "Generic (PLEG): container finished" podID="81fb37a5-540d-440d-b0f7-3ba11bad7c42" containerID="70bbbb16d356ac03b51145d0029f71decb75b6291fb8dc9d58b38fdbdf1347ce" exitCode=0 Nov 21 16:21:53 crc kubenswrapper[4967]: I1121 16:21:53.996137 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" event={"ID":"81fb37a5-540d-440d-b0f7-3ba11bad7c42","Type":"ContainerDied","Data":"70bbbb16d356ac03b51145d0029f71decb75b6291fb8dc9d58b38fdbdf1347ce"} Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.465981 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.618710 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft5hp\" (UniqueName: \"kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.618767 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.618836 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.618914 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.618937 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.619029 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.619120 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0\") pod \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\" (UID: \"81fb37a5-540d-440d-b0f7-3ba11bad7c42\") " Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.624468 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp" (OuterVolumeSpecName: "kube-api-access-ft5hp") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "kube-api-access-ft5hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.625043 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.652379 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.654801 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.657510 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory" (OuterVolumeSpecName: "inventory") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.657849 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.660592 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "81fb37a5-540d-440d-b0f7-3ba11bad7c42" (UID: "81fb37a5-540d-440d-b0f7-3ba11bad7c42"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.724944 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.724989 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft5hp\" (UniqueName: \"kubernetes.io/projected/81fb37a5-540d-440d-b0f7-3ba11bad7c42-kube-api-access-ft5hp\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.725003 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.725015 4967 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.725027 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.725039 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:55 crc kubenswrapper[4967]: I1121 16:21:55.725051 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/81fb37a5-540d-440d-b0f7-3ba11bad7c42-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.019220 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" event={"ID":"81fb37a5-540d-440d-b0f7-3ba11bad7c42","Type":"ContainerDied","Data":"f9500739ddc89fbd48aa47e34529589b269e6cf5451d193b88431b9814096dbe"} Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.020054 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9500739ddc89fbd48aa47e34529589b269e6cf5451d193b88431b9814096dbe" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.019288 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-6ls69" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.111495 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7"] Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.111959 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="extract-content" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.111976 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="extract-content" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112001 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="extract-utilities" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112010 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="extract-utilities" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112028 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fb37a5-540d-440d-b0f7-3ba11bad7c42" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112037 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fb37a5-540d-440d-b0f7-3ba11bad7c42" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112054 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="extract-content" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112059 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="extract-content" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112075 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112081 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112095 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="extract-utilities" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112103 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="extract-utilities" Nov 21 16:21:56 crc kubenswrapper[4967]: E1121 16:21:56.112114 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112120 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112387 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2cb4ed6-ba6e-48ef-8c86-10ce0bcfe4c8" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112401 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff34758-2a27-4567-a0bc-860296edea61" containerName="registry-server" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.112422 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="81fb37a5-540d-440d-b0f7-3ba11bad7c42" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.113179 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.122557 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-ipmi-config-data" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.122814 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.122945 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.123090 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.123274 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.129015 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7"] Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145549 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145623 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9pc5\" (UniqueName: \"kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145671 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145724 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145770 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145835 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.145890 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247475 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247530 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9pc5\" (UniqueName: \"kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247562 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247604 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247649 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247694 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.247738 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.252560 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.252580 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.253152 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.253794 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.256270 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.263740 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.276014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9pc5\" (UniqueName: \"kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5\") pod \"telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.442007 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:21:56 crc kubenswrapper[4967]: I1121 16:21:56.979206 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7"] Nov 21 16:21:57 crc kubenswrapper[4967]: I1121 16:21:57.034241 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" event={"ID":"5f0377eb-356a-49e8-9919-765ca8e2fb52","Type":"ContainerStarted","Data":"1935c69ccee821dfe3b5312a848f58dd46117e11fac3007e21b22a47b4d097fe"} Nov 21 16:21:58 crc kubenswrapper[4967]: I1121 16:21:58.049358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" event={"ID":"5f0377eb-356a-49e8-9919-765ca8e2fb52","Type":"ContainerStarted","Data":"b5e53b400aabd4726d96036cff452b00a65a508cd12c862280f92891d3a26d29"} Nov 21 16:21:58 crc kubenswrapper[4967]: I1121 16:21:58.074632 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" podStartSLOduration=1.402030109 podStartE2EDuration="2.074602114s" podCreationTimestamp="2025-11-21 16:21:56 +0000 UTC" firstStartedPulling="2025-11-21 16:21:56.984059953 +0000 UTC m=+2805.242580981" lastFinishedPulling="2025-11-21 16:21:57.656631928 +0000 UTC m=+2805.915152986" observedRunningTime="2025-11-21 16:21:58.072592727 +0000 UTC m=+2806.331113735" watchObservedRunningTime="2025-11-21 16:21:58.074602114 +0000 UTC m=+2806.333123122" Nov 21 16:22:16 crc kubenswrapper[4967]: I1121 16:22:16.522046 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:22:16 crc kubenswrapper[4967]: I1121 16:22:16.522664 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:22:46 crc kubenswrapper[4967]: I1121 16:22:46.522618 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:22:46 crc kubenswrapper[4967]: I1121 16:22:46.523250 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:22:46 crc kubenswrapper[4967]: I1121 16:22:46.523388 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:22:46 crc kubenswrapper[4967]: I1121 16:22:46.524139 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:22:46 crc kubenswrapper[4967]: I1121 16:22:46.524187 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5" gracePeriod=600 Nov 21 16:22:47 crc kubenswrapper[4967]: I1121 16:22:47.577304 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5" exitCode=0 Nov 21 16:22:47 crc kubenswrapper[4967]: I1121 16:22:47.577368 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5"} Nov 21 16:22:47 crc kubenswrapper[4967]: I1121 16:22:47.578212 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01"} Nov 21 16:22:47 crc kubenswrapper[4967]: I1121 16:22:47.578238 4967 scope.go:117] "RemoveContainer" containerID="dc80059c22bd0ac36b94a62cc887cfbf22720d3031bfdc4b7a14e4b7cad815c8" Nov 21 16:23:57 crc kubenswrapper[4967]: I1121 16:23:57.355140 4967 generic.go:334] "Generic (PLEG): container finished" podID="5f0377eb-356a-49e8-9919-765ca8e2fb52" containerID="b5e53b400aabd4726d96036cff452b00a65a508cd12c862280f92891d3a26d29" exitCode=0 Nov 21 16:23:57 crc kubenswrapper[4967]: I1121 16:23:57.355220 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" event={"ID":"5f0377eb-356a-49e8-9919-765ca8e2fb52","Type":"ContainerDied","Data":"b5e53b400aabd4726d96036cff452b00a65a508cd12c862280f92891d3a26d29"} Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.915890 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.971040 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.971937 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.971987 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.972040 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.972090 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9pc5\" (UniqueName: \"kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:58 crc kubenswrapper[4967]: I1121 16:23:58.982947 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5" (OuterVolumeSpecName: "kube-api-access-p9pc5") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "kube-api-access-p9pc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.012828 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-2") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "ceilometer-ipmi-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.018971 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-0") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "ceilometer-ipmi-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.020348 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory" (OuterVolumeSpecName: "inventory") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.021542 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073246 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073287 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle\") pod \"5f0377eb-356a-49e8-9919-765ca8e2fb52\" (UID: \"5f0377eb-356a-49e8-9919-765ca8e2fb52\") " Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073836 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-0\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073861 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-2\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073876 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073888 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.073900 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9pc5\" (UniqueName: \"kubernetes.io/projected/5f0377eb-356a-49e8-9919-765ca8e2fb52-kube-api-access-p9pc5\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.077893 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.175919 4967 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.327090 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1" (OuterVolumeSpecName: "ceilometer-ipmi-config-data-1") pod "5f0377eb-356a-49e8-9919-765ca8e2fb52" (UID: "5f0377eb-356a-49e8-9919-765ca8e2fb52"). InnerVolumeSpecName "ceilometer-ipmi-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.380453 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-ipmi-config-data-1\" (UniqueName: \"kubernetes.io/secret/5f0377eb-356a-49e8-9919-765ca8e2fb52-ceilometer-ipmi-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.384052 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" event={"ID":"5f0377eb-356a-49e8-9919-765ca8e2fb52","Type":"ContainerDied","Data":"1935c69ccee821dfe3b5312a848f58dd46117e11fac3007e21b22a47b4d097fe"} Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.384089 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1935c69ccee821dfe3b5312a848f58dd46117e11fac3007e21b22a47b4d097fe" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.384193 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.572684 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp"] Nov 21 16:23:59 crc kubenswrapper[4967]: E1121 16:23:59.573282 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f0377eb-356a-49e8-9919-765ca8e2fb52" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.573324 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f0377eb-356a-49e8-9919-765ca8e2fb52" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.573582 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f0377eb-356a-49e8-9919-765ca8e2fb52" containerName="telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.574499 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.582857 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.583087 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.583301 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.583436 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-rn5c5" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.593172 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"logging-compute-config-data" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.625503 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp"] Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.696970 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmxv5\" (UniqueName: \"kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.697174 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.697272 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.697439 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.697637 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.800228 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.800333 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.800419 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.800455 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmxv5\" (UniqueName: \"kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.800536 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.806225 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.806406 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.810800 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.811011 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.821915 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmxv5\" (UniqueName: \"kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5\") pod \"logging-edpm-deployment-openstack-edpm-ipam-6knkp\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:23:59 crc kubenswrapper[4967]: I1121 16:23:59.926259 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:24:00 crc kubenswrapper[4967]: I1121 16:24:00.502715 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp"] Nov 21 16:24:01 crc kubenswrapper[4967]: I1121 16:24:01.407867 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" event={"ID":"e8d33040-61f0-4a55-9df6-cfa0b1513c43","Type":"ContainerStarted","Data":"36bcab8ba3cbd54af91b2c1ad5fdfbcdc66a5ab869d66ce8c31f93bb877558cf"} Nov 21 16:24:01 crc kubenswrapper[4967]: I1121 16:24:01.408583 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" event={"ID":"e8d33040-61f0-4a55-9df6-cfa0b1513c43","Type":"ContainerStarted","Data":"43d1ca7314be7c043e879aa8bcfe79fec0e0974b843e800bda01847240d459ff"} Nov 21 16:24:01 crc kubenswrapper[4967]: I1121 16:24:01.440977 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" podStartSLOduration=2.020135872 podStartE2EDuration="2.440955946s" podCreationTimestamp="2025-11-21 16:23:59 +0000 UTC" firstStartedPulling="2025-11-21 16:24:00.507102339 +0000 UTC m=+2928.765623347" lastFinishedPulling="2025-11-21 16:24:00.927922403 +0000 UTC m=+2929.186443421" observedRunningTime="2025-11-21 16:24:01.423795527 +0000 UTC m=+2929.682316535" watchObservedRunningTime="2025-11-21 16:24:01.440955946 +0000 UTC m=+2929.699476954" Nov 21 16:24:17 crc kubenswrapper[4967]: I1121 16:24:17.584074 4967 generic.go:334] "Generic (PLEG): container finished" podID="e8d33040-61f0-4a55-9df6-cfa0b1513c43" containerID="36bcab8ba3cbd54af91b2c1ad5fdfbcdc66a5ab869d66ce8c31f93bb877558cf" exitCode=0 Nov 21 16:24:17 crc kubenswrapper[4967]: I1121 16:24:17.584159 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" event={"ID":"e8d33040-61f0-4a55-9df6-cfa0b1513c43","Type":"ContainerDied","Data":"36bcab8ba3cbd54af91b2c1ad5fdfbcdc66a5ab869d66ce8c31f93bb877558cf"} Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.612563 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" event={"ID":"e8d33040-61f0-4a55-9df6-cfa0b1513c43","Type":"ContainerDied","Data":"43d1ca7314be7c043e879aa8bcfe79fec0e0974b843e800bda01847240d459ff"} Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.613140 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43d1ca7314be7c043e879aa8bcfe79fec0e0974b843e800bda01847240d459ff" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.675100 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.794656 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1\") pod \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.794955 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmxv5\" (UniqueName: \"kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5\") pod \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.795044 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0\") pod \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.795770 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory\") pod \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.795872 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key\") pod \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\" (UID: \"e8d33040-61f0-4a55-9df6-cfa0b1513c43\") " Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.803243 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5" (OuterVolumeSpecName: "kube-api-access-vmxv5") pod "e8d33040-61f0-4a55-9df6-cfa0b1513c43" (UID: "e8d33040-61f0-4a55-9df6-cfa0b1513c43"). InnerVolumeSpecName "kube-api-access-vmxv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.834712 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e8d33040-61f0-4a55-9df6-cfa0b1513c43" (UID: "e8d33040-61f0-4a55-9df6-cfa0b1513c43"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.836861 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory" (OuterVolumeSpecName: "inventory") pod "e8d33040-61f0-4a55-9df6-cfa0b1513c43" (UID: "e8d33040-61f0-4a55-9df6-cfa0b1513c43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.837595 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1" (OuterVolumeSpecName: "logging-compute-config-data-1") pod "e8d33040-61f0-4a55-9df6-cfa0b1513c43" (UID: "e8d33040-61f0-4a55-9df6-cfa0b1513c43"). InnerVolumeSpecName "logging-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.840241 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0" (OuterVolumeSpecName: "logging-compute-config-data-0") pod "e8d33040-61f0-4a55-9df6-cfa0b1513c43" (UID: "e8d33040-61f0-4a55-9df6-cfa0b1513c43"). InnerVolumeSpecName "logging-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.900090 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmxv5\" (UniqueName: \"kubernetes.io/projected/e8d33040-61f0-4a55-9df6-cfa0b1513c43-kube-api-access-vmxv5\") on node \"crc\" DevicePath \"\"" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.900147 4967 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.900161 4967 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.900172 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 16:24:19 crc kubenswrapper[4967]: I1121 16:24:19.900183 4967 reconciler_common.go:293] "Volume detached for volume \"logging-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e8d33040-61f0-4a55-9df6-cfa0b1513c43-logging-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 21 16:24:20 crc kubenswrapper[4967]: I1121 16:24:20.621360 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/logging-edpm-deployment-openstack-edpm-ipam-6knkp" Nov 21 16:24:46 crc kubenswrapper[4967]: I1121 16:24:46.522775 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:24:46 crc kubenswrapper[4967]: I1121 16:24:46.523477 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:25:16 crc kubenswrapper[4967]: I1121 16:25:16.522216 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:25:16 crc kubenswrapper[4967]: I1121 16:25:16.522844 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:25:46 crc kubenswrapper[4967]: I1121 16:25:46.522006 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:25:46 crc kubenswrapper[4967]: I1121 16:25:46.522649 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:25:46 crc kubenswrapper[4967]: I1121 16:25:46.522703 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:25:46 crc kubenswrapper[4967]: I1121 16:25:46.523676 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:25:46 crc kubenswrapper[4967]: I1121 16:25:46.523728 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" gracePeriod=600 Nov 21 16:25:47 crc kubenswrapper[4967]: E1121 16:25:47.166438 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:25:47 crc kubenswrapper[4967]: I1121 16:25:47.579877 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" exitCode=0 Nov 21 16:25:47 crc kubenswrapper[4967]: I1121 16:25:47.579928 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01"} Nov 21 16:25:47 crc kubenswrapper[4967]: I1121 16:25:47.579970 4967 scope.go:117] "RemoveContainer" containerID="46a318f4ee5716fb3c6255601721ef4b2e2a3c23833fc15562caae3d65cac0d5" Nov 21 16:25:47 crc kubenswrapper[4967]: I1121 16:25:47.580791 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:25:47 crc kubenswrapper[4967]: E1121 16:25:47.581129 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:26:00 crc kubenswrapper[4967]: I1121 16:26:00.536727 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:26:00 crc kubenswrapper[4967]: E1121 16:26:00.538424 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:26:11 crc kubenswrapper[4967]: I1121 16:26:11.537058 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:26:11 crc kubenswrapper[4967]: E1121 16:26:11.537870 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:26:22 crc kubenswrapper[4967]: I1121 16:26:22.544622 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:26:22 crc kubenswrapper[4967]: E1121 16:26:22.545370 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:26:36 crc kubenswrapper[4967]: I1121 16:26:36.536496 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:26:36 crc kubenswrapper[4967]: E1121 16:26:36.537328 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:26:49 crc kubenswrapper[4967]: I1121 16:26:49.536821 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:26:49 crc kubenswrapper[4967]: E1121 16:26:49.537698 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:27:00 crc kubenswrapper[4967]: I1121 16:27:00.537679 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:27:00 crc kubenswrapper[4967]: E1121 16:27:00.538695 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.485493 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:01 crc kubenswrapper[4967]: E1121 16:27:01.486427 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8d33040-61f0-4a55-9df6-cfa0b1513c43" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.486450 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8d33040-61f0-4a55-9df6-cfa0b1513c43" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.486754 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8d33040-61f0-4a55-9df6-cfa0b1513c43" containerName="logging-edpm-deployment-openstack-edpm-ipam" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.488654 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.516226 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.590245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.590435 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.590493 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgld9\" (UniqueName: \"kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.678821 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.681531 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.694109 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.694662 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.695156 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.695496 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.695555 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgld9\" (UniqueName: \"kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.703013 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.731255 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgld9\" (UniqueName: \"kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9\") pod \"redhat-marketplace-g46ws\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.798938 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.799544 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwrhm\" (UniqueName: \"kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.800029 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.816532 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.901948 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwrhm\" (UniqueName: \"kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.902045 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.902148 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.903004 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.903004 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:01 crc kubenswrapper[4967]: I1121 16:27:01.920857 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwrhm\" (UniqueName: \"kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm\") pod \"redhat-operators-m8h7b\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.014431 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.392903 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.611478 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:02 crc kubenswrapper[4967]: W1121 16:27:02.647429 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c811fdf_c898_4093_b919_c2b59d7f596e.slice/crio-5bf9da7fce1d0b0681cb30ecf23cd70ad96341ffa1d51e9e732b27c37def2612 WatchSource:0}: Error finding container 5bf9da7fce1d0b0681cb30ecf23cd70ad96341ffa1d51e9e732b27c37def2612: Status 404 returned error can't find the container with id 5bf9da7fce1d0b0681cb30ecf23cd70ad96341ffa1d51e9e732b27c37def2612 Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.692119 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerStarted","Data":"5bf9da7fce1d0b0681cb30ecf23cd70ad96341ffa1d51e9e732b27c37def2612"} Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.709227 4967 generic.go:334] "Generic (PLEG): container finished" podID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerID="d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14" exitCode=0 Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.709301 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerDied","Data":"d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14"} Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.709357 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerStarted","Data":"4967d9f33e7a061af377452745913a4bb237a346515d88009e772f80f7e98e3f"} Nov 21 16:27:02 crc kubenswrapper[4967]: I1121 16:27:02.717742 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:27:03 crc kubenswrapper[4967]: I1121 16:27:03.722478 4967 generic.go:334] "Generic (PLEG): container finished" podID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerID="69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43" exitCode=0 Nov 21 16:27:03 crc kubenswrapper[4967]: I1121 16:27:03.722616 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerDied","Data":"69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43"} Nov 21 16:27:04 crc kubenswrapper[4967]: I1121 16:27:04.739067 4967 generic.go:334] "Generic (PLEG): container finished" podID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerID="c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074" exitCode=0 Nov 21 16:27:04 crc kubenswrapper[4967]: I1121 16:27:04.739123 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerDied","Data":"c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074"} Nov 21 16:27:05 crc kubenswrapper[4967]: I1121 16:27:05.751845 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerStarted","Data":"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541"} Nov 21 16:27:05 crc kubenswrapper[4967]: I1121 16:27:05.754087 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerStarted","Data":"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9"} Nov 21 16:27:05 crc kubenswrapper[4967]: I1121 16:27:05.800401 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g46ws" podStartSLOduration=2.416143864 podStartE2EDuration="4.800382043s" podCreationTimestamp="2025-11-21 16:27:01 +0000 UTC" firstStartedPulling="2025-11-21 16:27:02.717496505 +0000 UTC m=+3110.976017513" lastFinishedPulling="2025-11-21 16:27:05.101734684 +0000 UTC m=+3113.360255692" observedRunningTime="2025-11-21 16:27:05.790915193 +0000 UTC m=+3114.049436211" watchObservedRunningTime="2025-11-21 16:27:05.800382043 +0000 UTC m=+3114.058903051" Nov 21 16:27:06 crc kubenswrapper[4967]: I1121 16:27:06.767841 4967 generic.go:334] "Generic (PLEG): container finished" podID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerID="87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541" exitCode=0 Nov 21 16:27:06 crc kubenswrapper[4967]: I1121 16:27:06.767902 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerDied","Data":"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541"} Nov 21 16:27:08 crc kubenswrapper[4967]: I1121 16:27:08.789735 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerStarted","Data":"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5"} Nov 21 16:27:08 crc kubenswrapper[4967]: I1121 16:27:08.815841 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-m8h7b" podStartSLOduration=3.453361379 podStartE2EDuration="7.815822418s" podCreationTimestamp="2025-11-21 16:27:01 +0000 UTC" firstStartedPulling="2025-11-21 16:27:03.724837908 +0000 UTC m=+3111.983358916" lastFinishedPulling="2025-11-21 16:27:08.087298947 +0000 UTC m=+3116.345819955" observedRunningTime="2025-11-21 16:27:08.812808832 +0000 UTC m=+3117.071329840" watchObservedRunningTime="2025-11-21 16:27:08.815822418 +0000 UTC m=+3117.074343436" Nov 21 16:27:11 crc kubenswrapper[4967]: I1121 16:27:11.818296 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:11 crc kubenswrapper[4967]: I1121 16:27:11.818946 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:11 crc kubenswrapper[4967]: I1121 16:27:11.888099 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:12 crc kubenswrapper[4967]: I1121 16:27:12.014918 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:12 crc kubenswrapper[4967]: I1121 16:27:12.014972 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:12 crc kubenswrapper[4967]: I1121 16:27:12.549991 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:27:12 crc kubenswrapper[4967]: E1121 16:27:12.551283 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:27:12 crc kubenswrapper[4967]: I1121 16:27:12.883783 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:13 crc kubenswrapper[4967]: I1121 16:27:13.062329 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-m8h7b" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="registry-server" probeResult="failure" output=< Nov 21 16:27:13 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:27:13 crc kubenswrapper[4967]: > Nov 21 16:27:13 crc kubenswrapper[4967]: I1121 16:27:13.269906 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:14 crc kubenswrapper[4967]: I1121 16:27:14.853683 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-g46ws" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="registry-server" containerID="cri-o://bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9" gracePeriod=2 Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.386290 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.463358 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgld9\" (UniqueName: \"kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9\") pod \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.463697 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content\") pod \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.463860 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities\") pod \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\" (UID: \"c69f1528-41e1-43f9-aa3d-13d1a2754af0\") " Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.465461 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities" (OuterVolumeSpecName: "utilities") pod "c69f1528-41e1-43f9-aa3d-13d1a2754af0" (UID: "c69f1528-41e1-43f9-aa3d-13d1a2754af0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.476174 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9" (OuterVolumeSpecName: "kube-api-access-rgld9") pod "c69f1528-41e1-43f9-aa3d-13d1a2754af0" (UID: "c69f1528-41e1-43f9-aa3d-13d1a2754af0"). InnerVolumeSpecName "kube-api-access-rgld9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.488150 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c69f1528-41e1-43f9-aa3d-13d1a2754af0" (UID: "c69f1528-41e1-43f9-aa3d-13d1a2754af0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.567741 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgld9\" (UniqueName: \"kubernetes.io/projected/c69f1528-41e1-43f9-aa3d-13d1a2754af0-kube-api-access-rgld9\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.567784 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.567793 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c69f1528-41e1-43f9-aa3d-13d1a2754af0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.874268 4967 generic.go:334] "Generic (PLEG): container finished" podID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerID="bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9" exitCode=0 Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.874336 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerDied","Data":"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9"} Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.874374 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g46ws" event={"ID":"c69f1528-41e1-43f9-aa3d-13d1a2754af0","Type":"ContainerDied","Data":"4967d9f33e7a061af377452745913a4bb237a346515d88009e772f80f7e98e3f"} Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.874375 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g46ws" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.874398 4967 scope.go:117] "RemoveContainer" containerID="bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.908991 4967 scope.go:117] "RemoveContainer" containerID="c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.917655 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.927594 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-g46ws"] Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.945643 4967 scope.go:117] "RemoveContainer" containerID="d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.991690 4967 scope.go:117] "RemoveContainer" containerID="bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9" Nov 21 16:27:15 crc kubenswrapper[4967]: E1121 16:27:15.992422 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9\": container with ID starting with bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9 not found: ID does not exist" containerID="bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.992464 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9"} err="failed to get container status \"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9\": rpc error: code = NotFound desc = could not find container \"bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9\": container with ID starting with bb7caeea67a47c05487e653422183e84f0cd822626747d2d02c008678a83cbd9 not found: ID does not exist" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.992505 4967 scope.go:117] "RemoveContainer" containerID="c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074" Nov 21 16:27:15 crc kubenswrapper[4967]: E1121 16:27:15.992809 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074\": container with ID starting with c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074 not found: ID does not exist" containerID="c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.992834 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074"} err="failed to get container status \"c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074\": rpc error: code = NotFound desc = could not find container \"c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074\": container with ID starting with c1f4f6f70840dec8c66e688f1d32a1d9fbe03f5ee5ca743c02e9e9759d16a074 not found: ID does not exist" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.992852 4967 scope.go:117] "RemoveContainer" containerID="d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14" Nov 21 16:27:15 crc kubenswrapper[4967]: E1121 16:27:15.993602 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14\": container with ID starting with d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14 not found: ID does not exist" containerID="d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14" Nov 21 16:27:15 crc kubenswrapper[4967]: I1121 16:27:15.993625 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14"} err="failed to get container status \"d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14\": rpc error: code = NotFound desc = could not find container \"d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14\": container with ID starting with d0d210a43309eb82ace5ff824bd506c3799994308b24524d7d15d566f1d47b14 not found: ID does not exist" Nov 21 16:27:16 crc kubenswrapper[4967]: I1121 16:27:16.552649 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" path="/var/lib/kubelet/pods/c69f1528-41e1-43f9-aa3d-13d1a2754af0/volumes" Nov 21 16:27:22 crc kubenswrapper[4967]: I1121 16:27:22.069910 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:22 crc kubenswrapper[4967]: I1121 16:27:22.119077 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:22 crc kubenswrapper[4967]: I1121 16:27:22.315491 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:23 crc kubenswrapper[4967]: I1121 16:27:23.969554 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-m8h7b" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="registry-server" containerID="cri-o://6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5" gracePeriod=2 Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.491517 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.614003 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwrhm\" (UniqueName: \"kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm\") pod \"1c811fdf-c898-4093-b919-c2b59d7f596e\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.614100 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content\") pod \"1c811fdf-c898-4093-b919-c2b59d7f596e\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.614121 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities\") pod \"1c811fdf-c898-4093-b919-c2b59d7f596e\" (UID: \"1c811fdf-c898-4093-b919-c2b59d7f596e\") " Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.615220 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities" (OuterVolumeSpecName: "utilities") pod "1c811fdf-c898-4093-b919-c2b59d7f596e" (UID: "1c811fdf-c898-4093-b919-c2b59d7f596e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.615728 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.621661 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm" (OuterVolumeSpecName: "kube-api-access-xwrhm") pod "1c811fdf-c898-4093-b919-c2b59d7f596e" (UID: "1c811fdf-c898-4093-b919-c2b59d7f596e"). InnerVolumeSpecName "kube-api-access-xwrhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.712412 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c811fdf-c898-4093-b919-c2b59d7f596e" (UID: "1c811fdf-c898-4093-b919-c2b59d7f596e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.718102 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwrhm\" (UniqueName: \"kubernetes.io/projected/1c811fdf-c898-4093-b919-c2b59d7f596e-kube-api-access-xwrhm\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.718140 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c811fdf-c898-4093-b919-c2b59d7f596e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.982837 4967 generic.go:334] "Generic (PLEG): container finished" podID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerID="6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5" exitCode=0 Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.982884 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerDied","Data":"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5"} Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.982913 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-m8h7b" event={"ID":"1c811fdf-c898-4093-b919-c2b59d7f596e","Type":"ContainerDied","Data":"5bf9da7fce1d0b0681cb30ecf23cd70ad96341ffa1d51e9e732b27c37def2612"} Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.982933 4967 scope.go:117] "RemoveContainer" containerID="6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5" Nov 21 16:27:24 crc kubenswrapper[4967]: I1121 16:27:24.983080 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-m8h7b" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.020046 4967 scope.go:117] "RemoveContainer" containerID="87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.026308 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.049738 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-m8h7b"] Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.055496 4967 scope.go:117] "RemoveContainer" containerID="69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.114426 4967 scope.go:117] "RemoveContainer" containerID="6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5" Nov 21 16:27:25 crc kubenswrapper[4967]: E1121 16:27:25.114890 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5\": container with ID starting with 6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5 not found: ID does not exist" containerID="6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.114938 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5"} err="failed to get container status \"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5\": rpc error: code = NotFound desc = could not find container \"6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5\": container with ID starting with 6454999cd112f2cbbb5c3e5fc2159ad6101d95a7e907ffa325e51ed4a55517c5 not found: ID does not exist" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.114966 4967 scope.go:117] "RemoveContainer" containerID="87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541" Nov 21 16:27:25 crc kubenswrapper[4967]: E1121 16:27:25.115496 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541\": container with ID starting with 87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541 not found: ID does not exist" containerID="87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.115633 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541"} err="failed to get container status \"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541\": rpc error: code = NotFound desc = could not find container \"87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541\": container with ID starting with 87063666499b14d77d3b8a18b17d45694eba21e11d3f097523ce9135ad3e5541 not found: ID does not exist" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.115666 4967 scope.go:117] "RemoveContainer" containerID="69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43" Nov 21 16:27:25 crc kubenswrapper[4967]: E1121 16:27:25.116138 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43\": container with ID starting with 69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43 not found: ID does not exist" containerID="69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.116157 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43"} err="failed to get container status \"69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43\": rpc error: code = NotFound desc = could not find container \"69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43\": container with ID starting with 69a077ea516f8a95f3cec66b03d8e14dd77b0da8a4ead74f3139015bc1bedb43 not found: ID does not exist" Nov 21 16:27:25 crc kubenswrapper[4967]: I1121 16:27:25.537672 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:27:25 crc kubenswrapper[4967]: E1121 16:27:25.537977 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:27:26 crc kubenswrapper[4967]: I1121 16:27:26.556393 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" path="/var/lib/kubelet/pods/1c811fdf-c898-4093-b919-c2b59d7f596e/volumes" Nov 21 16:27:37 crc kubenswrapper[4967]: I1121 16:27:37.536199 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:27:37 crc kubenswrapper[4967]: E1121 16:27:37.537060 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:27:49 crc kubenswrapper[4967]: I1121 16:27:49.537624 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:27:49 crc kubenswrapper[4967]: E1121 16:27:49.538391 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:28:03 crc kubenswrapper[4967]: I1121 16:28:03.536487 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:28:03 crc kubenswrapper[4967]: E1121 16:28:03.537422 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:28:15 crc kubenswrapper[4967]: I1121 16:28:15.536520 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:28:15 crc kubenswrapper[4967]: E1121 16:28:15.537268 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:28:26 crc kubenswrapper[4967]: I1121 16:28:26.536477 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:28:26 crc kubenswrapper[4967]: E1121 16:28:26.537295 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:28:39 crc kubenswrapper[4967]: I1121 16:28:39.537522 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:28:39 crc kubenswrapper[4967]: E1121 16:28:39.538832 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:28:50 crc kubenswrapper[4967]: I1121 16:28:50.536556 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:28:50 crc kubenswrapper[4967]: E1121 16:28:50.537593 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:29:01 crc kubenswrapper[4967]: I1121 16:29:01.536634 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:29:01 crc kubenswrapper[4967]: E1121 16:29:01.537499 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:29:12 crc kubenswrapper[4967]: I1121 16:29:12.544795 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:29:12 crc kubenswrapper[4967]: E1121 16:29:12.545785 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:29:20 crc kubenswrapper[4967]: I1121 16:29:20.348365 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-794fb7d789-mkxk2" podUID="9488c46d-11de-4819-9784-e32e3893a5d9" containerName="proxy-server" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 21 16:29:23 crc kubenswrapper[4967]: I1121 16:29:23.536085 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:29:23 crc kubenswrapper[4967]: E1121 16:29:23.537019 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:29:38 crc kubenswrapper[4967]: I1121 16:29:38.536852 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:29:38 crc kubenswrapper[4967]: E1121 16:29:38.537802 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:29:52 crc kubenswrapper[4967]: I1121 16:29:52.546481 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:29:52 crc kubenswrapper[4967]: E1121 16:29:52.554385 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.206558 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8"] Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209769 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="extract-utilities" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209789 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="extract-utilities" Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209817 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209823 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209832 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="extract-utilities" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209839 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="extract-utilities" Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209883 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="extract-content" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209888 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="extract-content" Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209902 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209908 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: E1121 16:30:01.209918 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="extract-content" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.209924 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="extract-content" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.210216 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c811fdf-c898-4093-b919-c2b59d7f596e" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.210247 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="c69f1528-41e1-43f9-aa3d-13d1a2754af0" containerName="registry-server" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.211539 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.216465 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.217073 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.253070 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8"] Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.335451 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.335942 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbz74\" (UniqueName: \"kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.336066 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.438041 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbz74\" (UniqueName: \"kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.438188 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.438429 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.439333 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.445396 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.455430 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbz74\" (UniqueName: \"kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74\") pod \"collect-profiles-29395710-ffvb8\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:01 crc kubenswrapper[4967]: I1121 16:30:01.549804 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:02 crc kubenswrapper[4967]: I1121 16:30:02.178124 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8"] Nov 21 16:30:03 crc kubenswrapper[4967]: I1121 16:30:03.202126 4967 generic.go:334] "Generic (PLEG): container finished" podID="7739352c-5106-4aec-b99a-2eab9c577078" containerID="8c9255165734ec39ce45f9515dd6ac8d5132bc78f32a54b8865a9d7ac0dc3614" exitCode=0 Nov 21 16:30:03 crc kubenswrapper[4967]: I1121 16:30:03.202214 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" event={"ID":"7739352c-5106-4aec-b99a-2eab9c577078","Type":"ContainerDied","Data":"8c9255165734ec39ce45f9515dd6ac8d5132bc78f32a54b8865a9d7ac0dc3614"} Nov 21 16:30:03 crc kubenswrapper[4967]: I1121 16:30:03.202454 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" event={"ID":"7739352c-5106-4aec-b99a-2eab9c577078","Type":"ContainerStarted","Data":"ca8590b425fc46158fbf98e0e66874b395d3be0851dbbd5be98e2de12af6590e"} Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.686979 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.721513 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume\") pod \"7739352c-5106-4aec-b99a-2eab9c577078\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.721702 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume\") pod \"7739352c-5106-4aec-b99a-2eab9c577078\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.721815 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbz74\" (UniqueName: \"kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74\") pod \"7739352c-5106-4aec-b99a-2eab9c577078\" (UID: \"7739352c-5106-4aec-b99a-2eab9c577078\") " Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.722599 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume" (OuterVolumeSpecName: "config-volume") pod "7739352c-5106-4aec-b99a-2eab9c577078" (UID: "7739352c-5106-4aec-b99a-2eab9c577078"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.728922 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7739352c-5106-4aec-b99a-2eab9c577078" (UID: "7739352c-5106-4aec-b99a-2eab9c577078"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.729048 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74" (OuterVolumeSpecName: "kube-api-access-jbz74") pod "7739352c-5106-4aec-b99a-2eab9c577078" (UID: "7739352c-5106-4aec-b99a-2eab9c577078"). InnerVolumeSpecName "kube-api-access-jbz74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.825658 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbz74\" (UniqueName: \"kubernetes.io/projected/7739352c-5106-4aec-b99a-2eab9c577078-kube-api-access-jbz74\") on node \"crc\" DevicePath \"\"" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.825710 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7739352c-5106-4aec-b99a-2eab9c577078-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:30:04 crc kubenswrapper[4967]: I1121 16:30:04.825723 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7739352c-5106-4aec-b99a-2eab9c577078-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:30:05 crc kubenswrapper[4967]: I1121 16:30:05.230072 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" event={"ID":"7739352c-5106-4aec-b99a-2eab9c577078","Type":"ContainerDied","Data":"ca8590b425fc46158fbf98e0e66874b395d3be0851dbbd5be98e2de12af6590e"} Nov 21 16:30:05 crc kubenswrapper[4967]: I1121 16:30:05.230119 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca8590b425fc46158fbf98e0e66874b395d3be0851dbbd5be98e2de12af6590e" Nov 21 16:30:05 crc kubenswrapper[4967]: I1121 16:30:05.230132 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8" Nov 21 16:30:05 crc kubenswrapper[4967]: I1121 16:30:05.772247 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd"] Nov 21 16:30:05 crc kubenswrapper[4967]: I1121 16:30:05.786108 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-rtvmd"] Nov 21 16:30:06 crc kubenswrapper[4967]: I1121 16:30:06.553893 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d59513f6-55e3-4bbb-9207-cdd4936124cd" path="/var/lib/kubelet/pods/d59513f6-55e3-4bbb-9207-cdd4936124cd/volumes" Nov 21 16:30:07 crc kubenswrapper[4967]: I1121 16:30:07.537405 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:30:07 crc kubenswrapper[4967]: E1121 16:30:07.538443 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:30:18 crc kubenswrapper[4967]: I1121 16:30:18.536737 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:30:18 crc kubenswrapper[4967]: E1121 16:30:18.537588 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:30:23 crc kubenswrapper[4967]: I1121 16:30:23.964246 4967 scope.go:117] "RemoveContainer" containerID="9a3a36ddff348a89aee0880c30634d6cca08a15fa3fc20d5e9eaa5aa0e5f7e0e" Nov 21 16:30:32 crc kubenswrapper[4967]: I1121 16:30:32.544728 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:30:32 crc kubenswrapper[4967]: E1121 16:30:32.546387 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:30:43 crc kubenswrapper[4967]: I1121 16:30:43.536139 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:30:43 crc kubenswrapper[4967]: E1121 16:30:43.537142 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:30:58 crc kubenswrapper[4967]: I1121 16:30:58.538028 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:30:58 crc kubenswrapper[4967]: I1121 16:30:58.798276 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f"} Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.091709 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:25 crc kubenswrapper[4967]: E1121 16:31:25.092972 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7739352c-5106-4aec-b99a-2eab9c577078" containerName="collect-profiles" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.092992 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7739352c-5106-4aec-b99a-2eab9c577078" containerName="collect-profiles" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.093301 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7739352c-5106-4aec-b99a-2eab9c577078" containerName="collect-profiles" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.095529 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.106557 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.230113 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gctb\" (UniqueName: \"kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.230480 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.230622 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.333148 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gctb\" (UniqueName: \"kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.333203 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.333283 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.333886 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.333965 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.367126 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gctb\" (UniqueName: \"kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb\") pod \"certified-operators-psfcp\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.419036 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:25 crc kubenswrapper[4967]: I1121 16:31:25.963424 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:26 crc kubenswrapper[4967]: I1121 16:31:26.098027 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerStarted","Data":"e81e6f44f7d1e82b83c6a016b0ea31e5f1d58e00366f53a56008ca8c959c046b"} Nov 21 16:31:27 crc kubenswrapper[4967]: I1121 16:31:27.109666 4967 generic.go:334] "Generic (PLEG): container finished" podID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerID="f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40" exitCode=0 Nov 21 16:31:27 crc kubenswrapper[4967]: I1121 16:31:27.109737 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerDied","Data":"f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40"} Nov 21 16:31:28 crc kubenswrapper[4967]: I1121 16:31:28.121580 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerStarted","Data":"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246"} Nov 21 16:31:30 crc kubenswrapper[4967]: I1121 16:31:30.144995 4967 generic.go:334] "Generic (PLEG): container finished" podID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerID="6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246" exitCode=0 Nov 21 16:31:30 crc kubenswrapper[4967]: I1121 16:31:30.145040 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerDied","Data":"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246"} Nov 21 16:31:31 crc kubenswrapper[4967]: I1121 16:31:31.163045 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerStarted","Data":"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8"} Nov 21 16:31:31 crc kubenswrapper[4967]: I1121 16:31:31.187801 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-psfcp" podStartSLOduration=2.783190977 podStartE2EDuration="6.187781293s" podCreationTimestamp="2025-11-21 16:31:25 +0000 UTC" firstStartedPulling="2025-11-21 16:31:27.111839217 +0000 UTC m=+3375.370360225" lastFinishedPulling="2025-11-21 16:31:30.516429533 +0000 UTC m=+3378.774950541" observedRunningTime="2025-11-21 16:31:31.182546354 +0000 UTC m=+3379.441067372" watchObservedRunningTime="2025-11-21 16:31:31.187781293 +0000 UTC m=+3379.446302301" Nov 21 16:31:35 crc kubenswrapper[4967]: I1121 16:31:35.419371 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:35 crc kubenswrapper[4967]: I1121 16:31:35.420609 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:35 crc kubenswrapper[4967]: I1121 16:31:35.474410 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:36 crc kubenswrapper[4967]: I1121 16:31:36.266158 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:36 crc kubenswrapper[4967]: I1121 16:31:36.327134 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.237173 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-psfcp" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="registry-server" containerID="cri-o://7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8" gracePeriod=2 Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.759005 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.881450 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gctb\" (UniqueName: \"kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb\") pod \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.881565 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities\") pod \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.881598 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content\") pod \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\" (UID: \"b349f2ca-246e-4fe4-9e75-f678ca46b5c7\") " Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.882560 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities" (OuterVolumeSpecName: "utilities") pod "b349f2ca-246e-4fe4-9e75-f678ca46b5c7" (UID: "b349f2ca-246e-4fe4-9e75-f678ca46b5c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.889627 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb" (OuterVolumeSpecName: "kube-api-access-2gctb") pod "b349f2ca-246e-4fe4-9e75-f678ca46b5c7" (UID: "b349f2ca-246e-4fe4-9e75-f678ca46b5c7"). InnerVolumeSpecName "kube-api-access-2gctb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.926708 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b349f2ca-246e-4fe4-9e75-f678ca46b5c7" (UID: "b349f2ca-246e-4fe4-9e75-f678ca46b5c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.985450 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gctb\" (UniqueName: \"kubernetes.io/projected/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-kube-api-access-2gctb\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.985738 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:38 crc kubenswrapper[4967]: I1121 16:31:38.985753 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b349f2ca-246e-4fe4-9e75-f678ca46b5c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.251543 4967 generic.go:334] "Generic (PLEG): container finished" podID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerID="7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8" exitCode=0 Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.251616 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-psfcp" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.251614 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerDied","Data":"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8"} Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.251704 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-psfcp" event={"ID":"b349f2ca-246e-4fe4-9e75-f678ca46b5c7","Type":"ContainerDied","Data":"e81e6f44f7d1e82b83c6a016b0ea31e5f1d58e00366f53a56008ca8c959c046b"} Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.251739 4967 scope.go:117] "RemoveContainer" containerID="7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.281554 4967 scope.go:117] "RemoveContainer" containerID="6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.288381 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.307159 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-psfcp"] Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.342221 4967 scope.go:117] "RemoveContainer" containerID="f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.379909 4967 scope.go:117] "RemoveContainer" containerID="7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8" Nov 21 16:31:39 crc kubenswrapper[4967]: E1121 16:31:39.380368 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8\": container with ID starting with 7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8 not found: ID does not exist" containerID="7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.380402 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8"} err="failed to get container status \"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8\": rpc error: code = NotFound desc = could not find container \"7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8\": container with ID starting with 7da00c57158152260289e1930738d1a5a26a01b70b6616cfec8c63c8f30898f8 not found: ID does not exist" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.380424 4967 scope.go:117] "RemoveContainer" containerID="6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246" Nov 21 16:31:39 crc kubenswrapper[4967]: E1121 16:31:39.380658 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246\": container with ID starting with 6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246 not found: ID does not exist" containerID="6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.380697 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246"} err="failed to get container status \"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246\": rpc error: code = NotFound desc = could not find container \"6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246\": container with ID starting with 6f340083ae563b26da801f3b64be14f18bf4e2d8b33b10b7118323c9af39e246 not found: ID does not exist" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.380710 4967 scope.go:117] "RemoveContainer" containerID="f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40" Nov 21 16:31:39 crc kubenswrapper[4967]: E1121 16:31:39.380920 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40\": container with ID starting with f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40 not found: ID does not exist" containerID="f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40" Nov 21 16:31:39 crc kubenswrapper[4967]: I1121 16:31:39.380940 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40"} err="failed to get container status \"f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40\": rpc error: code = NotFound desc = could not find container \"f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40\": container with ID starting with f825ccff80ccc52f5b8aa1eaed5d567e57e1e2ffe6c820a84b833909a4128b40 not found: ID does not exist" Nov 21 16:31:40 crc kubenswrapper[4967]: I1121 16:31:40.552078 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" path="/var/lib/kubelet/pods/b349f2ca-246e-4fe4-9e75-f678ca46b5c7/volumes" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.322906 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:43 crc kubenswrapper[4967]: E1121 16:31:43.323964 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="extract-utilities" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.323982 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="extract-utilities" Nov 21 16:31:43 crc kubenswrapper[4967]: E1121 16:31:43.324042 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="extract-content" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.324048 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="extract-content" Nov 21 16:31:43 crc kubenswrapper[4967]: E1121 16:31:43.324057 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="registry-server" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.324064 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="registry-server" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.324463 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b349f2ca-246e-4fe4-9e75-f678ca46b5c7" containerName="registry-server" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.326340 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.339202 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.398932 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.399117 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6pkb\" (UniqueName: \"kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.399332 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.501558 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6pkb\" (UniqueName: \"kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.501993 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.502166 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.502446 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.502729 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.522143 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6pkb\" (UniqueName: \"kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb\") pod \"community-operators-5sv4k\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:43 crc kubenswrapper[4967]: I1121 16:31:43.664307 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:44 crc kubenswrapper[4967]: I1121 16:31:44.231846 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:44 crc kubenswrapper[4967]: I1121 16:31:44.315728 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerStarted","Data":"f417ffdef6c2b2217d1cd6b7ed2f79c92e9c983c8c85f43dd9d3a289de0e2e24"} Nov 21 16:31:45 crc kubenswrapper[4967]: I1121 16:31:45.329715 4967 generic.go:334] "Generic (PLEG): container finished" podID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerID="20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f" exitCode=0 Nov 21 16:31:45 crc kubenswrapper[4967]: I1121 16:31:45.329792 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerDied","Data":"20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f"} Nov 21 16:31:46 crc kubenswrapper[4967]: I1121 16:31:46.346256 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerStarted","Data":"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46"} Nov 21 16:31:48 crc kubenswrapper[4967]: I1121 16:31:48.365608 4967 generic.go:334] "Generic (PLEG): container finished" podID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerID="00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46" exitCode=0 Nov 21 16:31:48 crc kubenswrapper[4967]: I1121 16:31:48.365690 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerDied","Data":"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46"} Nov 21 16:31:49 crc kubenswrapper[4967]: I1121 16:31:49.380233 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerStarted","Data":"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174"} Nov 21 16:31:49 crc kubenswrapper[4967]: I1121 16:31:49.410855 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5sv4k" podStartSLOduration=2.962005605 podStartE2EDuration="6.410825012s" podCreationTimestamp="2025-11-21 16:31:43 +0000 UTC" firstStartedPulling="2025-11-21 16:31:45.332256351 +0000 UTC m=+3393.590777359" lastFinishedPulling="2025-11-21 16:31:48.781075758 +0000 UTC m=+3397.039596766" observedRunningTime="2025-11-21 16:31:49.400664972 +0000 UTC m=+3397.659185990" watchObservedRunningTime="2025-11-21 16:31:49.410825012 +0000 UTC m=+3397.669346020" Nov 21 16:31:53 crc kubenswrapper[4967]: I1121 16:31:53.664744 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:53 crc kubenswrapper[4967]: I1121 16:31:53.665425 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:53 crc kubenswrapper[4967]: I1121 16:31:53.715045 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:54 crc kubenswrapper[4967]: I1121 16:31:54.492051 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:58 crc kubenswrapper[4967]: I1121 16:31:58.512192 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:58 crc kubenswrapper[4967]: I1121 16:31:58.513130 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5sv4k" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="registry-server" containerID="cri-o://2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174" gracePeriod=2 Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.026418 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.095760 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content\") pod \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.095854 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities\") pod \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.095956 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6pkb\" (UniqueName: \"kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb\") pod \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\" (UID: \"a5a00160-e2fe-4a03-aaed-59b22c6fd700\") " Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.096987 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities" (OuterVolumeSpecName: "utilities") pod "a5a00160-e2fe-4a03-aaed-59b22c6fd700" (UID: "a5a00160-e2fe-4a03-aaed-59b22c6fd700"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.101866 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb" (OuterVolumeSpecName: "kube-api-access-n6pkb") pod "a5a00160-e2fe-4a03-aaed-59b22c6fd700" (UID: "a5a00160-e2fe-4a03-aaed-59b22c6fd700"). InnerVolumeSpecName "kube-api-access-n6pkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.144866 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5a00160-e2fe-4a03-aaed-59b22c6fd700" (UID: "a5a00160-e2fe-4a03-aaed-59b22c6fd700"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.199192 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6pkb\" (UniqueName: \"kubernetes.io/projected/a5a00160-e2fe-4a03-aaed-59b22c6fd700-kube-api-access-n6pkb\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.199236 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.199247 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5a00160-e2fe-4a03-aaed-59b22c6fd700-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.500789 4967 generic.go:334] "Generic (PLEG): container finished" podID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerID="2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174" exitCode=0 Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.500874 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5sv4k" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.500854 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerDied","Data":"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174"} Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.501244 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5sv4k" event={"ID":"a5a00160-e2fe-4a03-aaed-59b22c6fd700","Type":"ContainerDied","Data":"f417ffdef6c2b2217d1cd6b7ed2f79c92e9c983c8c85f43dd9d3a289de0e2e24"} Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.501271 4967 scope.go:117] "RemoveContainer" containerID="2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.529878 4967 scope.go:117] "RemoveContainer" containerID="00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.538177 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.551582 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5sv4k"] Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.558591 4967 scope.go:117] "RemoveContainer" containerID="20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.608703 4967 scope.go:117] "RemoveContainer" containerID="2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174" Nov 21 16:31:59 crc kubenswrapper[4967]: E1121 16:31:59.609287 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174\": container with ID starting with 2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174 not found: ID does not exist" containerID="2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.609355 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174"} err="failed to get container status \"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174\": rpc error: code = NotFound desc = could not find container \"2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174\": container with ID starting with 2d754d28c2e18afbf2d22adf04e9f316bd600cebd45fdfbe53d32c0a16e5b174 not found: ID does not exist" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.609396 4967 scope.go:117] "RemoveContainer" containerID="00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46" Nov 21 16:31:59 crc kubenswrapper[4967]: E1121 16:31:59.610006 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46\": container with ID starting with 00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46 not found: ID does not exist" containerID="00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.610035 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46"} err="failed to get container status \"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46\": rpc error: code = NotFound desc = could not find container \"00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46\": container with ID starting with 00de5a7fbe8d88f068a24515620821d406a9d6daf9c2c1caa78ed0442e980e46 not found: ID does not exist" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.610058 4967 scope.go:117] "RemoveContainer" containerID="20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f" Nov 21 16:31:59 crc kubenswrapper[4967]: E1121 16:31:59.610459 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f\": container with ID starting with 20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f not found: ID does not exist" containerID="20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f" Nov 21 16:31:59 crc kubenswrapper[4967]: I1121 16:31:59.610499 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f"} err="failed to get container status \"20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f\": rpc error: code = NotFound desc = could not find container \"20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f\": container with ID starting with 20212241053deaa45184f8c585d938c71241c65474b78457e32a7fe58d67eb3f not found: ID does not exist" Nov 21 16:32:00 crc kubenswrapper[4967]: I1121 16:32:00.549921 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" path="/var/lib/kubelet/pods/a5a00160-e2fe-4a03-aaed-59b22c6fd700/volumes" Nov 21 16:33:16 crc kubenswrapper[4967]: I1121 16:33:16.522201 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:33:16 crc kubenswrapper[4967]: I1121 16:33:16.522769 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:33:46 crc kubenswrapper[4967]: I1121 16:33:46.522214 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:33:46 crc kubenswrapper[4967]: I1121 16:33:46.522848 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.521759 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.522364 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.522453 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.523346 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.523404 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f" gracePeriod=600 Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.987343 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f" exitCode=0 Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.987431 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f"} Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.987683 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903"} Nov 21 16:34:16 crc kubenswrapper[4967]: I1121 16:34:16.987707 4967 scope.go:117] "RemoveContainer" containerID="720a5c2d425ef26ba71d3c1a93ccae08065d6a6d353c31d77c49260193825f01" Nov 21 16:36:16 crc kubenswrapper[4967]: I1121 16:36:16.523195 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:36:16 crc kubenswrapper[4967]: I1121 16:36:16.523773 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:36:46 crc kubenswrapper[4967]: I1121 16:36:46.522787 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:36:46 crc kubenswrapper[4967]: I1121 16:36:46.523477 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:37:16 crc kubenswrapper[4967]: I1121 16:37:16.522261 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:37:16 crc kubenswrapper[4967]: I1121 16:37:16.522834 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:37:16 crc kubenswrapper[4967]: I1121 16:37:16.522882 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:37:16 crc kubenswrapper[4967]: I1121 16:37:16.523894 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:37:16 crc kubenswrapper[4967]: I1121 16:37:16.523956 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" gracePeriod=600 Nov 21 16:37:16 crc kubenswrapper[4967]: E1121 16:37:16.645527 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:37:17 crc kubenswrapper[4967]: I1121 16:37:17.082264 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" exitCode=0 Nov 21 16:37:17 crc kubenswrapper[4967]: I1121 16:37:17.082309 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903"} Nov 21 16:37:17 crc kubenswrapper[4967]: I1121 16:37:17.082355 4967 scope.go:117] "RemoveContainer" containerID="94e4d92854b385b411dc204ce4bcdc6c913b47f37afef75c6b871c83f6c7606f" Nov 21 16:37:17 crc kubenswrapper[4967]: I1121 16:37:17.082745 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:37:17 crc kubenswrapper[4967]: E1121 16:37:17.083051 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:37:30 crc kubenswrapper[4967]: I1121 16:37:30.536535 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:37:30 crc kubenswrapper[4967]: E1121 16:37:30.537753 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.801184 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:34 crc kubenswrapper[4967]: E1121 16:37:34.802495 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="extract-content" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.802514 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="extract-content" Nov 21 16:37:34 crc kubenswrapper[4967]: E1121 16:37:34.802538 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="registry-server" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.802549 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="registry-server" Nov 21 16:37:34 crc kubenswrapper[4967]: E1121 16:37:34.802596 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="extract-utilities" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.802604 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="extract-utilities" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.802906 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5a00160-e2fe-4a03-aaed-59b22c6fd700" containerName="registry-server" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.805113 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.832021 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.886870 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.887153 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4twpb\" (UniqueName: \"kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.887294 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.989340 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.989399 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4twpb\" (UniqueName: \"kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.989448 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.989932 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:34 crc kubenswrapper[4967]: I1121 16:37:34.989932 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:35 crc kubenswrapper[4967]: I1121 16:37:35.013679 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4twpb\" (UniqueName: \"kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb\") pod \"redhat-operators-jcgpw\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:35 crc kubenswrapper[4967]: I1121 16:37:35.128560 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:35 crc kubenswrapper[4967]: I1121 16:37:35.618558 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:36 crc kubenswrapper[4967]: I1121 16:37:36.314330 4967 generic.go:334] "Generic (PLEG): container finished" podID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerID="b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13" exitCode=0 Nov 21 16:37:36 crc kubenswrapper[4967]: I1121 16:37:36.314622 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerDied","Data":"b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13"} Nov 21 16:37:36 crc kubenswrapper[4967]: I1121 16:37:36.314765 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerStarted","Data":"1eeaffbea17fb85627630b9a2d0e8abc57f469bfb5218b27cfdca8b9dac5b0f9"} Nov 21 16:37:36 crc kubenswrapper[4967]: I1121 16:37:36.317558 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:37:38 crc kubenswrapper[4967]: I1121 16:37:38.356115 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerStarted","Data":"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68"} Nov 21 16:37:41 crc kubenswrapper[4967]: I1121 16:37:41.394974 4967 generic.go:334] "Generic (PLEG): container finished" podID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerID="fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68" exitCode=0 Nov 21 16:37:41 crc kubenswrapper[4967]: I1121 16:37:41.395503 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerDied","Data":"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68"} Nov 21 16:37:42 crc kubenswrapper[4967]: I1121 16:37:42.408642 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerStarted","Data":"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12"} Nov 21 16:37:42 crc kubenswrapper[4967]: I1121 16:37:42.447932 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jcgpw" podStartSLOduration=3.001170959 podStartE2EDuration="8.447910733s" podCreationTimestamp="2025-11-21 16:37:34 +0000 UTC" firstStartedPulling="2025-11-21 16:37:36.317235104 +0000 UTC m=+3744.575756122" lastFinishedPulling="2025-11-21 16:37:41.763974888 +0000 UTC m=+3750.022495896" observedRunningTime="2025-11-21 16:37:42.444591449 +0000 UTC m=+3750.703112457" watchObservedRunningTime="2025-11-21 16:37:42.447910733 +0000 UTC m=+3750.706431741" Nov 21 16:37:42 crc kubenswrapper[4967]: I1121 16:37:42.544295 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:37:42 crc kubenswrapper[4967]: E1121 16:37:42.544722 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:37:45 crc kubenswrapper[4967]: I1121 16:37:45.128957 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:45 crc kubenswrapper[4967]: I1121 16:37:45.129613 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:46 crc kubenswrapper[4967]: I1121 16:37:46.219508 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jcgpw" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="registry-server" probeResult="failure" output=< Nov 21 16:37:46 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:37:46 crc kubenswrapper[4967]: > Nov 21 16:37:55 crc kubenswrapper[4967]: I1121 16:37:55.183433 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:55 crc kubenswrapper[4967]: I1121 16:37:55.245112 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:55 crc kubenswrapper[4967]: I1121 16:37:55.419756 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:56 crc kubenswrapper[4967]: I1121 16:37:56.537783 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:37:56 crc kubenswrapper[4967]: E1121 16:37:56.538123 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:37:56 crc kubenswrapper[4967]: I1121 16:37:56.567587 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jcgpw" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="registry-server" containerID="cri-o://32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12" gracePeriod=2 Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.433484 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.551610 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content\") pod \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.552005 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities\") pod \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.552186 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4twpb\" (UniqueName: \"kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb\") pod \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\" (UID: \"705ba794-4a20-4ebe-bc4b-e9f448b48fab\") " Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.552702 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities" (OuterVolumeSpecName: "utilities") pod "705ba794-4a20-4ebe-bc4b-e9f448b48fab" (UID: "705ba794-4a20-4ebe-bc4b-e9f448b48fab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.554844 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.578491 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb" (OuterVolumeSpecName: "kube-api-access-4twpb") pod "705ba794-4a20-4ebe-bc4b-e9f448b48fab" (UID: "705ba794-4a20-4ebe-bc4b-e9f448b48fab"). InnerVolumeSpecName "kube-api-access-4twpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.587160 4967 generic.go:334] "Generic (PLEG): container finished" podID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerID="32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12" exitCode=0 Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.587227 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerDied","Data":"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12"} Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.587249 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jcgpw" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.587272 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jcgpw" event={"ID":"705ba794-4a20-4ebe-bc4b-e9f448b48fab","Type":"ContainerDied","Data":"1eeaffbea17fb85627630b9a2d0e8abc57f469bfb5218b27cfdca8b9dac5b0f9"} Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.587303 4967 scope.go:117] "RemoveContainer" containerID="32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.657036 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4twpb\" (UniqueName: \"kubernetes.io/projected/705ba794-4a20-4ebe-bc4b-e9f448b48fab-kube-api-access-4twpb\") on node \"crc\" DevicePath \"\"" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.678477 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "705ba794-4a20-4ebe-bc4b-e9f448b48fab" (UID: "705ba794-4a20-4ebe-bc4b-e9f448b48fab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.702466 4967 scope.go:117] "RemoveContainer" containerID="fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.754921 4967 scope.go:117] "RemoveContainer" containerID="b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.761920 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/705ba794-4a20-4ebe-bc4b-e9f448b48fab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.789432 4967 scope.go:117] "RemoveContainer" containerID="32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12" Nov 21 16:37:57 crc kubenswrapper[4967]: E1121 16:37:57.792709 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12\": container with ID starting with 32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12 not found: ID does not exist" containerID="32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.792784 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12"} err="failed to get container status \"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12\": rpc error: code = NotFound desc = could not find container \"32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12\": container with ID starting with 32241b340fec892a40eb6a9bb9fe0105c0516517b7aff06305c6366a85e1fb12 not found: ID does not exist" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.792830 4967 scope.go:117] "RemoveContainer" containerID="fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68" Nov 21 16:37:57 crc kubenswrapper[4967]: E1121 16:37:57.793411 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68\": container with ID starting with fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68 not found: ID does not exist" containerID="fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.793476 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68"} err="failed to get container status \"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68\": rpc error: code = NotFound desc = could not find container \"fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68\": container with ID starting with fb63697b4b9941ea9c396d045c50a9d3c02b6bbc1890b8c5117a73e727f9ef68 not found: ID does not exist" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.793512 4967 scope.go:117] "RemoveContainer" containerID="b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13" Nov 21 16:37:57 crc kubenswrapper[4967]: E1121 16:37:57.794587 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13\": container with ID starting with b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13 not found: ID does not exist" containerID="b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.794653 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13"} err="failed to get container status \"b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13\": rpc error: code = NotFound desc = could not find container \"b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13\": container with ID starting with b913abb39438396df09be700a7bbb6cec1ed999bdb250da3a9294c278fbe1a13 not found: ID does not exist" Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.933994 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:57 crc kubenswrapper[4967]: I1121 16:37:57.947929 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jcgpw"] Nov 21 16:37:58 crc kubenswrapper[4967]: I1121 16:37:58.549961 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" path="/var/lib/kubelet/pods/705ba794-4a20-4ebe-bc4b-e9f448b48fab/volumes" Nov 21 16:38:08 crc kubenswrapper[4967]: I1121 16:38:08.540955 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:38:08 crc kubenswrapper[4967]: E1121 16:38:08.542609 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.442406 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:14 crc kubenswrapper[4967]: E1121 16:38:14.443621 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="extract-content" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.443644 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="extract-content" Nov 21 16:38:14 crc kubenswrapper[4967]: E1121 16:38:14.443664 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="extract-utilities" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.443673 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="extract-utilities" Nov 21 16:38:14 crc kubenswrapper[4967]: E1121 16:38:14.443711 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="registry-server" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.443720 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="registry-server" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.444023 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="705ba794-4a20-4ebe-bc4b-e9f448b48fab" containerName="registry-server" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.446334 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.454947 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.577298 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.577460 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjn29\" (UniqueName: \"kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.577544 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.680927 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjn29\" (UniqueName: \"kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.681122 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.681333 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.681816 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.681971 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.717471 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjn29\" (UniqueName: \"kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29\") pod \"redhat-marketplace-r55t5\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:14 crc kubenswrapper[4967]: I1121 16:38:14.775251 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:15 crc kubenswrapper[4967]: I1121 16:38:15.304302 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:15 crc kubenswrapper[4967]: I1121 16:38:15.828744 4967 generic.go:334] "Generic (PLEG): container finished" podID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerID="1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73" exitCode=0 Nov 21 16:38:15 crc kubenswrapper[4967]: I1121 16:38:15.829281 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerDied","Data":"1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73"} Nov 21 16:38:15 crc kubenswrapper[4967]: I1121 16:38:15.829364 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerStarted","Data":"2c220f193d452122345e12e908be2b528ad6006d1bed0427bcafed7f141927f3"} Nov 21 16:38:17 crc kubenswrapper[4967]: I1121 16:38:17.865135 4967 generic.go:334] "Generic (PLEG): container finished" podID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerID="632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b" exitCode=0 Nov 21 16:38:17 crc kubenswrapper[4967]: I1121 16:38:17.865223 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerDied","Data":"632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b"} Nov 21 16:38:18 crc kubenswrapper[4967]: I1121 16:38:18.883191 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerStarted","Data":"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f"} Nov 21 16:38:18 crc kubenswrapper[4967]: I1121 16:38:18.912061 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r55t5" podStartSLOduration=2.462820809 podStartE2EDuration="4.912031703s" podCreationTimestamp="2025-11-21 16:38:14 +0000 UTC" firstStartedPulling="2025-11-21 16:38:15.83602906 +0000 UTC m=+3784.094550068" lastFinishedPulling="2025-11-21 16:38:18.285239954 +0000 UTC m=+3786.543760962" observedRunningTime="2025-11-21 16:38:18.901129192 +0000 UTC m=+3787.159650200" watchObservedRunningTime="2025-11-21 16:38:18.912031703 +0000 UTC m=+3787.170552711" Nov 21 16:38:20 crc kubenswrapper[4967]: I1121 16:38:20.538022 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:38:20 crc kubenswrapper[4967]: E1121 16:38:20.540488 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:38:24 crc kubenswrapper[4967]: I1121 16:38:24.776367 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:24 crc kubenswrapper[4967]: I1121 16:38:24.777207 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:24 crc kubenswrapper[4967]: I1121 16:38:24.854534 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:25 crc kubenswrapper[4967]: I1121 16:38:25.100570 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:25 crc kubenswrapper[4967]: I1121 16:38:25.221381 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:26 crc kubenswrapper[4967]: I1121 16:38:26.980613 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r55t5" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="registry-server" containerID="cri-o://e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f" gracePeriod=2 Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.511784 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.661837 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content\") pod \"3c91c84a-50b8-4a1f-8b36-c1db87735190\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.662077 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjn29\" (UniqueName: \"kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29\") pod \"3c91c84a-50b8-4a1f-8b36-c1db87735190\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.662180 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities\") pod \"3c91c84a-50b8-4a1f-8b36-c1db87735190\" (UID: \"3c91c84a-50b8-4a1f-8b36-c1db87735190\") " Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.663224 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities" (OuterVolumeSpecName: "utilities") pod "3c91c84a-50b8-4a1f-8b36-c1db87735190" (UID: "3c91c84a-50b8-4a1f-8b36-c1db87735190"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.668594 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29" (OuterVolumeSpecName: "kube-api-access-vjn29") pod "3c91c84a-50b8-4a1f-8b36-c1db87735190" (UID: "3c91c84a-50b8-4a1f-8b36-c1db87735190"). InnerVolumeSpecName "kube-api-access-vjn29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.680518 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3c91c84a-50b8-4a1f-8b36-c1db87735190" (UID: "3c91c84a-50b8-4a1f-8b36-c1db87735190"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.765292 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.765357 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3c91c84a-50b8-4a1f-8b36-c1db87735190-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:38:27 crc kubenswrapper[4967]: I1121 16:38:27.765375 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjn29\" (UniqueName: \"kubernetes.io/projected/3c91c84a-50b8-4a1f-8b36-c1db87735190-kube-api-access-vjn29\") on node \"crc\" DevicePath \"\"" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.002367 4967 generic.go:334] "Generic (PLEG): container finished" podID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerID="e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f" exitCode=0 Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.002413 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerDied","Data":"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f"} Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.002422 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r55t5" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.002444 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r55t5" event={"ID":"3c91c84a-50b8-4a1f-8b36-c1db87735190","Type":"ContainerDied","Data":"2c220f193d452122345e12e908be2b528ad6006d1bed0427bcafed7f141927f3"} Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.002462 4967 scope.go:117] "RemoveContainer" containerID="e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.030506 4967 scope.go:117] "RemoveContainer" containerID="632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.049612 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.065577 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r55t5"] Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.075641 4967 scope.go:117] "RemoveContainer" containerID="1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.118010 4967 scope.go:117] "RemoveContainer" containerID="e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f" Nov 21 16:38:28 crc kubenswrapper[4967]: E1121 16:38:28.118543 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f\": container with ID starting with e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f not found: ID does not exist" containerID="e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.118601 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f"} err="failed to get container status \"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f\": rpc error: code = NotFound desc = could not find container \"e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f\": container with ID starting with e836046d4dc8f04de7cdbdf528bcf0bfb77b9a555010559bba45c7ddb6960d8f not found: ID does not exist" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.118637 4967 scope.go:117] "RemoveContainer" containerID="632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b" Nov 21 16:38:28 crc kubenswrapper[4967]: E1121 16:38:28.119006 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b\": container with ID starting with 632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b not found: ID does not exist" containerID="632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.119038 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b"} err="failed to get container status \"632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b\": rpc error: code = NotFound desc = could not find container \"632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b\": container with ID starting with 632ce834f5926a1e77ed4d0ed7e0f6dded387821e60ef04a920ddfccaaa2171b not found: ID does not exist" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.119065 4967 scope.go:117] "RemoveContainer" containerID="1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73" Nov 21 16:38:28 crc kubenswrapper[4967]: E1121 16:38:28.119338 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73\": container with ID starting with 1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73 not found: ID does not exist" containerID="1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.119366 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73"} err="failed to get container status \"1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73\": rpc error: code = NotFound desc = could not find container \"1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73\": container with ID starting with 1ca4ccaef1f6a0fb0fe8408cd13277d1281a557cfce0b80e3ef5a32e536fbd73 not found: ID does not exist" Nov 21 16:38:28 crc kubenswrapper[4967]: I1121 16:38:28.549758 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" path="/var/lib/kubelet/pods/3c91c84a-50b8-4a1f-8b36-c1db87735190/volumes" Nov 21 16:38:31 crc kubenswrapper[4967]: I1121 16:38:31.537222 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:38:31 crc kubenswrapper[4967]: E1121 16:38:31.539241 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:38:43 crc kubenswrapper[4967]: I1121 16:38:43.537267 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:38:43 crc kubenswrapper[4967]: E1121 16:38:43.538222 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:38:55 crc kubenswrapper[4967]: I1121 16:38:55.536092 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:38:55 crc kubenswrapper[4967]: E1121 16:38:55.536801 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:39:09 crc kubenswrapper[4967]: I1121 16:39:09.536734 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:39:09 crc kubenswrapper[4967]: E1121 16:39:09.537463 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:39:24 crc kubenswrapper[4967]: I1121 16:39:24.536195 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:39:24 crc kubenswrapper[4967]: E1121 16:39:24.536969 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:39:35 crc kubenswrapper[4967]: I1121 16:39:35.536158 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:39:35 crc kubenswrapper[4967]: E1121 16:39:35.537532 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:39:46 crc kubenswrapper[4967]: I1121 16:39:46.536911 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:39:46 crc kubenswrapper[4967]: E1121 16:39:46.538446 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:00 crc kubenswrapper[4967]: I1121 16:40:00.536053 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:00 crc kubenswrapper[4967]: E1121 16:40:00.536854 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:12 crc kubenswrapper[4967]: I1121 16:40:12.546637 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:12 crc kubenswrapper[4967]: E1121 16:40:12.547680 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:23 crc kubenswrapper[4967]: I1121 16:40:23.536762 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:23 crc kubenswrapper[4967]: E1121 16:40:23.537768 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:34 crc kubenswrapper[4967]: I1121 16:40:34.536125 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:34 crc kubenswrapper[4967]: E1121 16:40:34.537133 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:47 crc kubenswrapper[4967]: I1121 16:40:47.537145 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:47 crc kubenswrapper[4967]: E1121 16:40:47.538123 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:40:59 crc kubenswrapper[4967]: I1121 16:40:59.537236 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:40:59 crc kubenswrapper[4967]: E1121 16:40:59.538180 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:41:10 crc kubenswrapper[4967]: I1121 16:41:10.536270 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:41:10 crc kubenswrapper[4967]: E1121 16:41:10.537052 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.172460 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:25 crc kubenswrapper[4967]: E1121 16:41:25.173745 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="extract-utilities" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.173764 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="extract-utilities" Nov 21 16:41:25 crc kubenswrapper[4967]: E1121 16:41:25.173841 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="registry-server" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.173851 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="registry-server" Nov 21 16:41:25 crc kubenswrapper[4967]: E1121 16:41:25.173936 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="extract-content" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.173946 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="extract-content" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.174633 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c91c84a-50b8-4a1f-8b36-c1db87735190" containerName="registry-server" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.176995 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.184271 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.338125 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.338280 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.338569 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4kjk\" (UniqueName: \"kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.440752 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.440828 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.440939 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4kjk\" (UniqueName: \"kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.441417 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.441438 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.471245 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4kjk\" (UniqueName: \"kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk\") pod \"certified-operators-rc8lg\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.499830 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:25 crc kubenswrapper[4967]: I1121 16:41:25.536936 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:41:25 crc kubenswrapper[4967]: E1121 16:41:25.537598 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:41:26 crc kubenswrapper[4967]: I1121 16:41:26.063943 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:26 crc kubenswrapper[4967]: I1121 16:41:26.929457 4967 generic.go:334] "Generic (PLEG): container finished" podID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerID="5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b" exitCode=0 Nov 21 16:41:26 crc kubenswrapper[4967]: I1121 16:41:26.930910 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerDied","Data":"5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b"} Nov 21 16:41:26 crc kubenswrapper[4967]: I1121 16:41:26.930974 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerStarted","Data":"8a49e61475e5cdce3cd968cf1eaab74b020ce7ab7f75e45164d40fa9dcb7bba9"} Nov 21 16:41:28 crc kubenswrapper[4967]: I1121 16:41:28.954492 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerStarted","Data":"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974"} Nov 21 16:41:30 crc kubenswrapper[4967]: I1121 16:41:30.978804 4967 generic.go:334] "Generic (PLEG): container finished" podID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerID="8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974" exitCode=0 Nov 21 16:41:30 crc kubenswrapper[4967]: I1121 16:41:30.978842 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerDied","Data":"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974"} Nov 21 16:41:31 crc kubenswrapper[4967]: I1121 16:41:31.995577 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerStarted","Data":"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0"} Nov 21 16:41:32 crc kubenswrapper[4967]: I1121 16:41:32.026942 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rc8lg" podStartSLOduration=2.235985702 podStartE2EDuration="7.026916903s" podCreationTimestamp="2025-11-21 16:41:25 +0000 UTC" firstStartedPulling="2025-11-21 16:41:26.932418527 +0000 UTC m=+3975.190939535" lastFinishedPulling="2025-11-21 16:41:31.723349728 +0000 UTC m=+3979.981870736" observedRunningTime="2025-11-21 16:41:32.023853715 +0000 UTC m=+3980.282374743" watchObservedRunningTime="2025-11-21 16:41:32.026916903 +0000 UTC m=+3980.285437911" Nov 21 16:41:35 crc kubenswrapper[4967]: I1121 16:41:35.500883 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:35 crc kubenswrapper[4967]: I1121 16:41:35.501424 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:35 crc kubenswrapper[4967]: I1121 16:41:35.629987 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:36 crc kubenswrapper[4967]: I1121 16:41:36.079505 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:36 crc kubenswrapper[4967]: I1121 16:41:36.133038 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:38 crc kubenswrapper[4967]: I1121 16:41:38.049972 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rc8lg" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="registry-server" containerID="cri-o://c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0" gracePeriod=2 Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.034146 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.064910 4967 generic.go:334] "Generic (PLEG): container finished" podID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerID="c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0" exitCode=0 Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.064995 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerDied","Data":"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0"} Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.065027 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rc8lg" event={"ID":"83b93f3c-dfc1-464d-8340-67fcace2081e","Type":"ContainerDied","Data":"8a49e61475e5cdce3cd968cf1eaab74b020ce7ab7f75e45164d40fa9dcb7bba9"} Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.065048 4967 scope.go:117] "RemoveContainer" containerID="c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.065191 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rc8lg" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.097022 4967 scope.go:117] "RemoveContainer" containerID="8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.136096 4967 scope.go:117] "RemoveContainer" containerID="5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.181783 4967 scope.go:117] "RemoveContainer" containerID="c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0" Nov 21 16:41:39 crc kubenswrapper[4967]: E1121 16:41:39.182268 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0\": container with ID starting with c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0 not found: ID does not exist" containerID="c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.182323 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0"} err="failed to get container status \"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0\": rpc error: code = NotFound desc = could not find container \"c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0\": container with ID starting with c670b1c9721d71f73099cfc567b86d6b661360b14fd2e02c0fe33a6684f408b0 not found: ID does not exist" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.182352 4967 scope.go:117] "RemoveContainer" containerID="8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974" Nov 21 16:41:39 crc kubenswrapper[4967]: E1121 16:41:39.182732 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974\": container with ID starting with 8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974 not found: ID does not exist" containerID="8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.182766 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974"} err="failed to get container status \"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974\": rpc error: code = NotFound desc = could not find container \"8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974\": container with ID starting with 8b2e52441f02296c6525b19006cf8c98e810f15a14caf41d5bad2d9dafe58974 not found: ID does not exist" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.182786 4967 scope.go:117] "RemoveContainer" containerID="5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b" Nov 21 16:41:39 crc kubenswrapper[4967]: E1121 16:41:39.183059 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b\": container with ID starting with 5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b not found: ID does not exist" containerID="5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.183086 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b"} err="failed to get container status \"5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b\": rpc error: code = NotFound desc = could not find container \"5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b\": container with ID starting with 5d9b82b6bd5bdeb2899dbd7464a3cb6055a09e6d63220324f361442d3ea7795b not found: ID does not exist" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.185036 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities\") pod \"83b93f3c-dfc1-464d-8340-67fcace2081e\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.185083 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content\") pod \"83b93f3c-dfc1-464d-8340-67fcace2081e\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.185302 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4kjk\" (UniqueName: \"kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk\") pod \"83b93f3c-dfc1-464d-8340-67fcace2081e\" (UID: \"83b93f3c-dfc1-464d-8340-67fcace2081e\") " Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.186327 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities" (OuterVolumeSpecName: "utilities") pod "83b93f3c-dfc1-464d-8340-67fcace2081e" (UID: "83b93f3c-dfc1-464d-8340-67fcace2081e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.192079 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk" (OuterVolumeSpecName: "kube-api-access-v4kjk") pod "83b93f3c-dfc1-464d-8340-67fcace2081e" (UID: "83b93f3c-dfc1-464d-8340-67fcace2081e"). InnerVolumeSpecName "kube-api-access-v4kjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.233689 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83b93f3c-dfc1-464d-8340-67fcace2081e" (UID: "83b93f3c-dfc1-464d-8340-67fcace2081e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.288235 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4kjk\" (UniqueName: \"kubernetes.io/projected/83b93f3c-dfc1-464d-8340-67fcace2081e-kube-api-access-v4kjk\") on node \"crc\" DevicePath \"\"" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.288275 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.288286 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83b93f3c-dfc1-464d-8340-67fcace2081e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.398773 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.408522 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rc8lg"] Nov 21 16:41:39 crc kubenswrapper[4967]: I1121 16:41:39.537516 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:41:39 crc kubenswrapper[4967]: E1121 16:41:39.538058 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:41:40 crc kubenswrapper[4967]: I1121 16:41:40.552349 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" path="/var/lib/kubelet/pods/83b93f3c-dfc1-464d-8340-67fcace2081e/volumes" Nov 21 16:41:50 crc kubenswrapper[4967]: I1121 16:41:50.537104 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:41:50 crc kubenswrapper[4967]: E1121 16:41:50.537988 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:42:04 crc kubenswrapper[4967]: I1121 16:42:04.536634 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:42:04 crc kubenswrapper[4967]: E1121 16:42:04.538586 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:42:17 crc kubenswrapper[4967]: I1121 16:42:17.536618 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:42:18 crc kubenswrapper[4967]: I1121 16:42:18.598787 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836"} Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.781224 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:42:52 crc kubenswrapper[4967]: E1121 16:42:52.782930 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="registry-server" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.782951 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="registry-server" Nov 21 16:42:52 crc kubenswrapper[4967]: E1121 16:42:52.783003 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="extract-utilities" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.783013 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="extract-utilities" Nov 21 16:42:52 crc kubenswrapper[4967]: E1121 16:42:52.783029 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="extract-content" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.783037 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="extract-content" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.783365 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="83b93f3c-dfc1-464d-8340-67fcace2081e" containerName="registry-server" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.785265 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.801327 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.871985 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.872195 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.872230 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmf5x\" (UniqueName: \"kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.974188 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.974233 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmf5x\" (UniqueName: \"kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.974418 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.974689 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:52 crc kubenswrapper[4967]: I1121 16:42:52.975071 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:53 crc kubenswrapper[4967]: I1121 16:42:53.809923 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmf5x\" (UniqueName: \"kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x\") pod \"community-operators-wwqts\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:54 crc kubenswrapper[4967]: I1121 16:42:54.008107 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:42:54 crc kubenswrapper[4967]: I1121 16:42:54.503868 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:42:54 crc kubenswrapper[4967]: I1121 16:42:54.997861 4967 generic.go:334] "Generic (PLEG): container finished" podID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerID="8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36" exitCode=0 Nov 21 16:42:54 crc kubenswrapper[4967]: I1121 16:42:54.997982 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerDied","Data":"8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36"} Nov 21 16:42:54 crc kubenswrapper[4967]: I1121 16:42:54.998338 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerStarted","Data":"9ec98242615b29e5e92aca4e782b316296c30db54cf50b677f4219558595a354"} Nov 21 16:42:55 crc kubenswrapper[4967]: I1121 16:42:55.001358 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:42:57 crc kubenswrapper[4967]: I1121 16:42:57.022624 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerStarted","Data":"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030"} Nov 21 16:42:58 crc kubenswrapper[4967]: I1121 16:42:58.036841 4967 generic.go:334] "Generic (PLEG): container finished" podID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerID="57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030" exitCode=0 Nov 21 16:42:58 crc kubenswrapper[4967]: I1121 16:42:58.036967 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerDied","Data":"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030"} Nov 21 16:42:59 crc kubenswrapper[4967]: I1121 16:42:59.052927 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerStarted","Data":"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e"} Nov 21 16:42:59 crc kubenswrapper[4967]: I1121 16:42:59.082995 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wwqts" podStartSLOduration=3.5207985 podStartE2EDuration="7.08297386s" podCreationTimestamp="2025-11-21 16:42:52 +0000 UTC" firstStartedPulling="2025-11-21 16:42:55.001034254 +0000 UTC m=+4063.259555272" lastFinishedPulling="2025-11-21 16:42:58.563209614 +0000 UTC m=+4066.821730632" observedRunningTime="2025-11-21 16:42:59.075520516 +0000 UTC m=+4067.334041534" watchObservedRunningTime="2025-11-21 16:42:59.08297386 +0000 UTC m=+4067.341494868" Nov 21 16:43:04 crc kubenswrapper[4967]: I1121 16:43:04.008251 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:04 crc kubenswrapper[4967]: I1121 16:43:04.008788 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:04 crc kubenswrapper[4967]: I1121 16:43:04.066517 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:04 crc kubenswrapper[4967]: I1121 16:43:04.167196 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:04 crc kubenswrapper[4967]: I1121 16:43:04.315157 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.134744 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wwqts" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="registry-server" containerID="cri-o://4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e" gracePeriod=2 Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.645039 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.728763 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities\") pod \"0c132c5a-9b2c-4f49-897c-047885ee3c69\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.728866 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content\") pod \"0c132c5a-9b2c-4f49-897c-047885ee3c69\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.728961 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmf5x\" (UniqueName: \"kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x\") pod \"0c132c5a-9b2c-4f49-897c-047885ee3c69\" (UID: \"0c132c5a-9b2c-4f49-897c-047885ee3c69\") " Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.729636 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities" (OuterVolumeSpecName: "utilities") pod "0c132c5a-9b2c-4f49-897c-047885ee3c69" (UID: "0c132c5a-9b2c-4f49-897c-047885ee3c69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.731147 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.738378 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x" (OuterVolumeSpecName: "kube-api-access-zmf5x") pod "0c132c5a-9b2c-4f49-897c-047885ee3c69" (UID: "0c132c5a-9b2c-4f49-897c-047885ee3c69"). InnerVolumeSpecName "kube-api-access-zmf5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.775446 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c132c5a-9b2c-4f49-897c-047885ee3c69" (UID: "0c132c5a-9b2c-4f49-897c-047885ee3c69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.833808 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmf5x\" (UniqueName: \"kubernetes.io/projected/0c132c5a-9b2c-4f49-897c-047885ee3c69-kube-api-access-zmf5x\") on node \"crc\" DevicePath \"\"" Nov 21 16:43:06 crc kubenswrapper[4967]: I1121 16:43:06.833855 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c132c5a-9b2c-4f49-897c-047885ee3c69-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.148447 4967 generic.go:334] "Generic (PLEG): container finished" podID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerID="4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e" exitCode=0 Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.148502 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wwqts" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.148513 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerDied","Data":"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e"} Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.148562 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wwqts" event={"ID":"0c132c5a-9b2c-4f49-897c-047885ee3c69","Type":"ContainerDied","Data":"9ec98242615b29e5e92aca4e782b316296c30db54cf50b677f4219558595a354"} Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.148621 4967 scope.go:117] "RemoveContainer" containerID="4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.181799 4967 scope.go:117] "RemoveContainer" containerID="57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.188148 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.198917 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wwqts"] Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.205160 4967 scope.go:117] "RemoveContainer" containerID="8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.264169 4967 scope.go:117] "RemoveContainer" containerID="4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e" Nov 21 16:43:07 crc kubenswrapper[4967]: E1121 16:43:07.264723 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e\": container with ID starting with 4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e not found: ID does not exist" containerID="4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.264757 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e"} err="failed to get container status \"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e\": rpc error: code = NotFound desc = could not find container \"4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e\": container with ID starting with 4d3deae780fa89854797b95212677558a469194d0af3afd9eeca7d63a49d980e not found: ID does not exist" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.264786 4967 scope.go:117] "RemoveContainer" containerID="57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030" Nov 21 16:43:07 crc kubenswrapper[4967]: E1121 16:43:07.265126 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030\": container with ID starting with 57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030 not found: ID does not exist" containerID="57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.265152 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030"} err="failed to get container status \"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030\": rpc error: code = NotFound desc = could not find container \"57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030\": container with ID starting with 57104df7a68f959aed4fbbf7586fc9a2f36ef28975272f194098e5fbe8c35030 not found: ID does not exist" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.265165 4967 scope.go:117] "RemoveContainer" containerID="8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36" Nov 21 16:43:07 crc kubenswrapper[4967]: E1121 16:43:07.265488 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36\": container with ID starting with 8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36 not found: ID does not exist" containerID="8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36" Nov 21 16:43:07 crc kubenswrapper[4967]: I1121 16:43:07.265519 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36"} err="failed to get container status \"8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36\": rpc error: code = NotFound desc = could not find container \"8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36\": container with ID starting with 8ad29a31ccf69e840247e5465b1fc3d412602c2fd885cc7e9e320a88ca19cb36 not found: ID does not exist" Nov 21 16:43:08 crc kubenswrapper[4967]: I1121 16:43:08.560711 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" path="/var/lib/kubelet/pods/0c132c5a-9b2c-4f49-897c-047885ee3c69/volumes" Nov 21 16:44:46 crc kubenswrapper[4967]: I1121 16:44:46.522556 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:44:46 crc kubenswrapper[4967]: I1121 16:44:46.523211 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.150055 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp"] Nov 21 16:45:00 crc kubenswrapper[4967]: E1121 16:45:00.151444 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="extract-content" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.151464 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="extract-content" Nov 21 16:45:00 crc kubenswrapper[4967]: E1121 16:45:00.151483 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="registry-server" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.151492 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="registry-server" Nov 21 16:45:00 crc kubenswrapper[4967]: E1121 16:45:00.151510 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="extract-utilities" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.151519 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="extract-utilities" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.151990 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c132c5a-9b2c-4f49-897c-047885ee3c69" containerName="registry-server" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.153294 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.156424 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.156824 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.157506 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7gxr\" (UniqueName: \"kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.157668 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.157713 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.163516 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp"] Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.260542 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7gxr\" (UniqueName: \"kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.260752 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.260799 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.262457 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.268437 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.278190 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7gxr\" (UniqueName: \"kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr\") pod \"collect-profiles-29395725-rw8dp\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:00 crc kubenswrapper[4967]: I1121 16:45:00.481825 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:01 crc kubenswrapper[4967]: I1121 16:45:01.026866 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp"] Nov 21 16:45:01 crc kubenswrapper[4967]: I1121 16:45:01.404718 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" event={"ID":"06225c23-8c36-469d-93c0-61f838f3afb1","Type":"ContainerStarted","Data":"d37d188eadefc9265d1ab36395fed01e7d9a39282cb8f886bbd8fbdff4398084"} Nov 21 16:45:01 crc kubenswrapper[4967]: I1121 16:45:01.405165 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" event={"ID":"06225c23-8c36-469d-93c0-61f838f3afb1","Type":"ContainerStarted","Data":"6a8ef7aa631ac09fbec72f41a4c9f78a91229af6adb91b8e2f418e8325303337"} Nov 21 16:45:01 crc kubenswrapper[4967]: I1121 16:45:01.425105 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" podStartSLOduration=1.425067198 podStartE2EDuration="1.425067198s" podCreationTimestamp="2025-11-21 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 16:45:01.418338806 +0000 UTC m=+4189.676859834" watchObservedRunningTime="2025-11-21 16:45:01.425067198 +0000 UTC m=+4189.683588216" Nov 21 16:45:02 crc kubenswrapper[4967]: I1121 16:45:02.419151 4967 generic.go:334] "Generic (PLEG): container finished" podID="06225c23-8c36-469d-93c0-61f838f3afb1" containerID="d37d188eadefc9265d1ab36395fed01e7d9a39282cb8f886bbd8fbdff4398084" exitCode=0 Nov 21 16:45:02 crc kubenswrapper[4967]: I1121 16:45:02.420461 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" event={"ID":"06225c23-8c36-469d-93c0-61f838f3afb1","Type":"ContainerDied","Data":"d37d188eadefc9265d1ab36395fed01e7d9a39282cb8f886bbd8fbdff4398084"} Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.841716 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.847819 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7gxr\" (UniqueName: \"kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr\") pod \"06225c23-8c36-469d-93c0-61f838f3afb1\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.847982 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume\") pod \"06225c23-8c36-469d-93c0-61f838f3afb1\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.848124 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume\") pod \"06225c23-8c36-469d-93c0-61f838f3afb1\" (UID: \"06225c23-8c36-469d-93c0-61f838f3afb1\") " Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.850819 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume" (OuterVolumeSpecName: "config-volume") pod "06225c23-8c36-469d-93c0-61f838f3afb1" (UID: "06225c23-8c36-469d-93c0-61f838f3afb1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.855426 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "06225c23-8c36-469d-93c0-61f838f3afb1" (UID: "06225c23-8c36-469d-93c0-61f838f3afb1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.855574 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr" (OuterVolumeSpecName: "kube-api-access-b7gxr") pod "06225c23-8c36-469d-93c0-61f838f3afb1" (UID: "06225c23-8c36-469d-93c0-61f838f3afb1"). InnerVolumeSpecName "kube-api-access-b7gxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.952664 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/06225c23-8c36-469d-93c0-61f838f3afb1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.952705 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/06225c23-8c36-469d-93c0-61f838f3afb1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 16:45:03 crc kubenswrapper[4967]: I1121 16:45:03.952719 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7gxr\" (UniqueName: \"kubernetes.io/projected/06225c23-8c36-469d-93c0-61f838f3afb1-kube-api-access-b7gxr\") on node \"crc\" DevicePath \"\"" Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.448904 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" event={"ID":"06225c23-8c36-469d-93c0-61f838f3afb1","Type":"ContainerDied","Data":"6a8ef7aa631ac09fbec72f41a4c9f78a91229af6adb91b8e2f418e8325303337"} Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.449210 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a8ef7aa631ac09fbec72f41a4c9f78a91229af6adb91b8e2f418e8325303337" Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.448979 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp" Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.502346 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l"] Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.513240 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395680-vhk6l"] Nov 21 16:45:04 crc kubenswrapper[4967]: I1121 16:45:04.553111 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="571c24e7-a9aa-4b5f-812e-be7b2ad9154a" path="/var/lib/kubelet/pods/571c24e7-a9aa-4b5f-812e-be7b2ad9154a/volumes" Nov 21 16:45:16 crc kubenswrapper[4967]: I1121 16:45:16.522392 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:45:16 crc kubenswrapper[4967]: I1121 16:45:16.522921 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:45:24 crc kubenswrapper[4967]: I1121 16:45:24.466154 4967 scope.go:117] "RemoveContainer" containerID="a6f304a1d098c6a9379f376126e217bedd1f07a3ebd8f27a03223f968385a270" Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.522101 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.522648 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.522696 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.523856 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.523926 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836" gracePeriod=600 Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.916082 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836" exitCode=0 Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.916123 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836"} Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.916464 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52"} Nov 21 16:45:46 crc kubenswrapper[4967]: I1121 16:45:46.916490 4967 scope.go:117] "RemoveContainer" containerID="bcb6bfadc48ffd5beadf58d0563085071be224b1dedcdec7470015a0f1865903" Nov 21 16:47:46 crc kubenswrapper[4967]: I1121 16:47:46.522607 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:47:46 crc kubenswrapper[4967]: I1121 16:47:46.523142 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:48:16 crc kubenswrapper[4967]: I1121 16:48:16.522893 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:48:16 crc kubenswrapper[4967]: I1121 16:48:16.523961 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.522024 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.522703 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.524033 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.524994 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.525165 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" gracePeriod=600 Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.958473 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" exitCode=0 Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.958841 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52"} Nov 21 16:48:46 crc kubenswrapper[4967]: I1121 16:48:46.958884 4967 scope.go:117] "RemoveContainer" containerID="ba972e3a53770a93442431b16b9ea59debb73599836dec99e6320827e6c01836" Nov 21 16:48:47 crc kubenswrapper[4967]: E1121 16:48:47.262294 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:48:47 crc kubenswrapper[4967]: I1121 16:48:47.974093 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:48:47 crc kubenswrapper[4967]: E1121 16:48:47.974814 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:49:03 crc kubenswrapper[4967]: I1121 16:49:03.536671 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:49:03 crc kubenswrapper[4967]: E1121 16:49:03.537438 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:49:18 crc kubenswrapper[4967]: I1121 16:49:18.536014 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:49:18 crc kubenswrapper[4967]: E1121 16:49:18.536842 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:49:32 crc kubenswrapper[4967]: I1121 16:49:32.544817 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:49:32 crc kubenswrapper[4967]: E1121 16:49:32.545650 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.149052 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:34 crc kubenswrapper[4967]: E1121 16:49:34.150247 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06225c23-8c36-469d-93c0-61f838f3afb1" containerName="collect-profiles" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.150265 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="06225c23-8c36-469d-93c0-61f838f3afb1" containerName="collect-profiles" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.152477 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="06225c23-8c36-469d-93c0-61f838f3afb1" containerName="collect-profiles" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.154621 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.168660 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.326667 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.327012 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.327245 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj4l8\" (UniqueName: \"kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.429768 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.430177 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.430268 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.430474 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj4l8\" (UniqueName: \"kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.430567 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:34 crc kubenswrapper[4967]: I1121 16:49:34.810687 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj4l8\" (UniqueName: \"kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8\") pod \"redhat-marketplace-lqths\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:35 crc kubenswrapper[4967]: I1121 16:49:35.081929 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:35 crc kubenswrapper[4967]: I1121 16:49:35.553104 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:36 crc kubenswrapper[4967]: I1121 16:49:36.516750 4967 generic.go:334] "Generic (PLEG): container finished" podID="cc658808-a977-460d-833e-bdcf52c18e7c" containerID="513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae" exitCode=0 Nov 21 16:49:36 crc kubenswrapper[4967]: I1121 16:49:36.516816 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerDied","Data":"513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae"} Nov 21 16:49:36 crc kubenswrapper[4967]: I1121 16:49:36.517295 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerStarted","Data":"613ec5a1d5bb3e307642da1c8d1069c4bccdcf9e0714824cba69b2d23b0b7f29"} Nov 21 16:49:36 crc kubenswrapper[4967]: I1121 16:49:36.519383 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:49:37 crc kubenswrapper[4967]: I1121 16:49:37.529852 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerStarted","Data":"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5"} Nov 21 16:49:38 crc kubenswrapper[4967]: I1121 16:49:38.553603 4967 generic.go:334] "Generic (PLEG): container finished" podID="cc658808-a977-460d-833e-bdcf52c18e7c" containerID="369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5" exitCode=0 Nov 21 16:49:38 crc kubenswrapper[4967]: I1121 16:49:38.557646 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerDied","Data":"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5"} Nov 21 16:49:39 crc kubenswrapper[4967]: I1121 16:49:39.567549 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerStarted","Data":"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42"} Nov 21 16:49:39 crc kubenswrapper[4967]: I1121 16:49:39.593910 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lqths" podStartSLOduration=3.135940818 podStartE2EDuration="5.593891374s" podCreationTimestamp="2025-11-21 16:49:34 +0000 UTC" firstStartedPulling="2025-11-21 16:49:36.519122421 +0000 UTC m=+4464.777643429" lastFinishedPulling="2025-11-21 16:49:38.977072977 +0000 UTC m=+4467.235593985" observedRunningTime="2025-11-21 16:49:39.583965039 +0000 UTC m=+4467.842486057" watchObservedRunningTime="2025-11-21 16:49:39.593891374 +0000 UTC m=+4467.852412382" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.122999 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.126206 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.155278 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.319673 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv2l6\" (UniqueName: \"kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.319861 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.320051 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.422483 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.422629 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.422676 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv2l6\" (UniqueName: \"kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.423035 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.423035 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.441048 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv2l6\" (UniqueName: \"kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6\") pod \"redhat-operators-zjpgt\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.454813 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:40 crc kubenswrapper[4967]: I1121 16:49:40.958903 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:49:41 crc kubenswrapper[4967]: I1121 16:49:41.589934 4967 generic.go:334] "Generic (PLEG): container finished" podID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerID="e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e" exitCode=0 Nov 21 16:49:41 crc kubenswrapper[4967]: I1121 16:49:41.590223 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerDied","Data":"e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e"} Nov 21 16:49:41 crc kubenswrapper[4967]: I1121 16:49:41.590251 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerStarted","Data":"63f7917985a34457bd53f11fd110b57a2889b191ab38f54bb4760515f1fb8670"} Nov 21 16:49:43 crc kubenswrapper[4967]: I1121 16:49:43.615364 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerStarted","Data":"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e"} Nov 21 16:49:45 crc kubenswrapper[4967]: I1121 16:49:45.082577 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:45 crc kubenswrapper[4967]: I1121 16:49:45.082942 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:45 crc kubenswrapper[4967]: I1121 16:49:45.132335 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:45 crc kubenswrapper[4967]: I1121 16:49:45.536167 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:49:45 crc kubenswrapper[4967]: E1121 16:49:45.536751 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:49:45 crc kubenswrapper[4967]: I1121 16:49:45.688953 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:47 crc kubenswrapper[4967]: I1121 16:49:47.661845 4967 generic.go:334] "Generic (PLEG): container finished" podID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerID="c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e" exitCode=0 Nov 21 16:49:47 crc kubenswrapper[4967]: I1121 16:49:47.661949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerDied","Data":"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e"} Nov 21 16:49:47 crc kubenswrapper[4967]: I1121 16:49:47.920369 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:47 crc kubenswrapper[4967]: I1121 16:49:47.921171 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lqths" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="registry-server" containerID="cri-o://5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42" gracePeriod=2 Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.514748 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.545565 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities\") pod \"cc658808-a977-460d-833e-bdcf52c18e7c\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.545658 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content\") pod \"cc658808-a977-460d-833e-bdcf52c18e7c\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.545734 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj4l8\" (UniqueName: \"kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8\") pod \"cc658808-a977-460d-833e-bdcf52c18e7c\" (UID: \"cc658808-a977-460d-833e-bdcf52c18e7c\") " Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.546305 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities" (OuterVolumeSpecName: "utilities") pod "cc658808-a977-460d-833e-bdcf52c18e7c" (UID: "cc658808-a977-460d-833e-bdcf52c18e7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.558238 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8" (OuterVolumeSpecName: "kube-api-access-wj4l8") pod "cc658808-a977-460d-833e-bdcf52c18e7c" (UID: "cc658808-a977-460d-833e-bdcf52c18e7c"). InnerVolumeSpecName "kube-api-access-wj4l8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.568968 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc658808-a977-460d-833e-bdcf52c18e7c" (UID: "cc658808-a977-460d-833e-bdcf52c18e7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.648803 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.648841 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc658808-a977-460d-833e-bdcf52c18e7c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.648850 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wj4l8\" (UniqueName: \"kubernetes.io/projected/cc658808-a977-460d-833e-bdcf52c18e7c-kube-api-access-wj4l8\") on node \"crc\" DevicePath \"\"" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.675713 4967 generic.go:334] "Generic (PLEG): container finished" podID="cc658808-a977-460d-833e-bdcf52c18e7c" containerID="5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42" exitCode=0 Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.675772 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerDied","Data":"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42"} Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.675822 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lqths" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.675849 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lqths" event={"ID":"cc658808-a977-460d-833e-bdcf52c18e7c","Type":"ContainerDied","Data":"613ec5a1d5bb3e307642da1c8d1069c4bccdcf9e0714824cba69b2d23b0b7f29"} Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.675875 4967 scope.go:117] "RemoveContainer" containerID="5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.679766 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerStarted","Data":"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4"} Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.695691 4967 scope.go:117] "RemoveContainer" containerID="369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.713103 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zjpgt" podStartSLOduration=2.230860627 podStartE2EDuration="8.713077336s" podCreationTimestamp="2025-11-21 16:49:40 +0000 UTC" firstStartedPulling="2025-11-21 16:49:41.591737962 +0000 UTC m=+4469.850258970" lastFinishedPulling="2025-11-21 16:49:48.073954671 +0000 UTC m=+4476.332475679" observedRunningTime="2025-11-21 16:49:48.700150996 +0000 UTC m=+4476.958672024" watchObservedRunningTime="2025-11-21 16:49:48.713077336 +0000 UTC m=+4476.971598334" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.727839 4967 scope.go:117] "RemoveContainer" containerID="513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.729013 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.738018 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lqths"] Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.760601 4967 scope.go:117] "RemoveContainer" containerID="5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42" Nov 21 16:49:48 crc kubenswrapper[4967]: E1121 16:49:48.761352 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42\": container with ID starting with 5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42 not found: ID does not exist" containerID="5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.761402 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42"} err="failed to get container status \"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42\": rpc error: code = NotFound desc = could not find container \"5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42\": container with ID starting with 5000f055955f478fdbc883ee7439438c06a2bf6d05d2b3daabcdf29494f96c42 not found: ID does not exist" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.761438 4967 scope.go:117] "RemoveContainer" containerID="369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5" Nov 21 16:49:48 crc kubenswrapper[4967]: E1121 16:49:48.762090 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5\": container with ID starting with 369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5 not found: ID does not exist" containerID="369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.762133 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5"} err="failed to get container status \"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5\": rpc error: code = NotFound desc = could not find container \"369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5\": container with ID starting with 369b8710e989641eaf05f77a93d4c980def6a92c81c92725b0a69c7b1faf45e5 not found: ID does not exist" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.762161 4967 scope.go:117] "RemoveContainer" containerID="513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae" Nov 21 16:49:48 crc kubenswrapper[4967]: E1121 16:49:48.762522 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae\": container with ID starting with 513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae not found: ID does not exist" containerID="513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae" Nov 21 16:49:48 crc kubenswrapper[4967]: I1121 16:49:48.762579 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae"} err="failed to get container status \"513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae\": rpc error: code = NotFound desc = could not find container \"513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae\": container with ID starting with 513bab4fe693c894032756ae3200f0394b1b08df2991b14227010c80ec1667ae not found: ID does not exist" Nov 21 16:49:50 crc kubenswrapper[4967]: I1121 16:49:50.455750 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:50 crc kubenswrapper[4967]: I1121 16:49:50.456467 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:49:50 crc kubenswrapper[4967]: I1121 16:49:50.555792 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" path="/var/lib/kubelet/pods/cc658808-a977-460d-833e-bdcf52c18e7c/volumes" Nov 21 16:49:51 crc kubenswrapper[4967]: I1121 16:49:51.512993 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zjpgt" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="registry-server" probeResult="failure" output=< Nov 21 16:49:51 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 16:49:51 crc kubenswrapper[4967]: > Nov 21 16:49:59 crc kubenswrapper[4967]: I1121 16:49:59.536438 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:49:59 crc kubenswrapper[4967]: E1121 16:49:59.537495 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:50:00 crc kubenswrapper[4967]: I1121 16:50:00.505433 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:50:00 crc kubenswrapper[4967]: I1121 16:50:00.561590 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:50:01 crc kubenswrapper[4967]: I1121 16:50:01.518345 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:50:01 crc kubenswrapper[4967]: I1121 16:50:01.816961 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zjpgt" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="registry-server" containerID="cri-o://47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4" gracePeriod=2 Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.849384 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.866645 4967 generic.go:334] "Generic (PLEG): container finished" podID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerID="47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4" exitCode=0 Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.866696 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerDied","Data":"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4"} Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.866727 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zjpgt" event={"ID":"059536a7-4148-47e1-a44d-acd83f8acd7b","Type":"ContainerDied","Data":"63f7917985a34457bd53f11fd110b57a2889b191ab38f54bb4760515f1fb8670"} Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.866746 4967 scope.go:117] "RemoveContainer" containerID="47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.866779 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zjpgt" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.907123 4967 scope.go:117] "RemoveContainer" containerID="c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.939983 4967 scope.go:117] "RemoveContainer" containerID="e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.942126 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") pod \"059536a7-4148-47e1-a44d-acd83f8acd7b\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.942281 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zv2l6\" (UniqueName: \"kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6\") pod \"059536a7-4148-47e1-a44d-acd83f8acd7b\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.942585 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities\") pod \"059536a7-4148-47e1-a44d-acd83f8acd7b\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.943658 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities" (OuterVolumeSpecName: "utilities") pod "059536a7-4148-47e1-a44d-acd83f8acd7b" (UID: "059536a7-4148-47e1-a44d-acd83f8acd7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:50:02 crc kubenswrapper[4967]: I1121 16:50:02.949458 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6" (OuterVolumeSpecName: "kube-api-access-zv2l6") pod "059536a7-4148-47e1-a44d-acd83f8acd7b" (UID: "059536a7-4148-47e1-a44d-acd83f8acd7b"). InnerVolumeSpecName "kube-api-access-zv2l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.043669 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "059536a7-4148-47e1-a44d-acd83f8acd7b" (UID: "059536a7-4148-47e1-a44d-acd83f8acd7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.044379 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") pod \"059536a7-4148-47e1-a44d-acd83f8acd7b\" (UID: \"059536a7-4148-47e1-a44d-acd83f8acd7b\") " Nov 21 16:50:03 crc kubenswrapper[4967]: W1121 16:50:03.046556 4967 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/059536a7-4148-47e1-a44d-acd83f8acd7b/volumes/kubernetes.io~empty-dir/catalog-content Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.046595 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "059536a7-4148-47e1-a44d-acd83f8acd7b" (UID: "059536a7-4148-47e1-a44d-acd83f8acd7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.047041 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.047070 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059536a7-4148-47e1-a44d-acd83f8acd7b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.047086 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zv2l6\" (UniqueName: \"kubernetes.io/projected/059536a7-4148-47e1-a44d-acd83f8acd7b-kube-api-access-zv2l6\") on node \"crc\" DevicePath \"\"" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.061406 4967 scope.go:117] "RemoveContainer" containerID="47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4" Nov 21 16:50:03 crc kubenswrapper[4967]: E1121 16:50:03.062482 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4\": container with ID starting with 47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4 not found: ID does not exist" containerID="47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.062541 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4"} err="failed to get container status \"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4\": rpc error: code = NotFound desc = could not find container \"47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4\": container with ID starting with 47edb634cbc566153864217b860cfa9ebe24fb05d18ef8414ef533f8c3a229e4 not found: ID does not exist" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.062577 4967 scope.go:117] "RemoveContainer" containerID="c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e" Nov 21 16:50:03 crc kubenswrapper[4967]: E1121 16:50:03.063499 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e\": container with ID starting with c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e not found: ID does not exist" containerID="c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.063577 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e"} err="failed to get container status \"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e\": rpc error: code = NotFound desc = could not find container \"c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e\": container with ID starting with c000e303c48bb3b1c8d40b72339cf9cdd6f6093e844f71742908d08ff09f092e not found: ID does not exist" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.063608 4967 scope.go:117] "RemoveContainer" containerID="e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e" Nov 21 16:50:03 crc kubenswrapper[4967]: E1121 16:50:03.064633 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e\": container with ID starting with e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e not found: ID does not exist" containerID="e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.064672 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e"} err="failed to get container status \"e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e\": rpc error: code = NotFound desc = could not find container \"e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e\": container with ID starting with e54ba7a28ef78342013894b87c58624becca4db40ef681dca95f6003d9051a0e not found: ID does not exist" Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.210386 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:50:03 crc kubenswrapper[4967]: I1121 16:50:03.222866 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zjpgt"] Nov 21 16:50:04 crc kubenswrapper[4967]: I1121 16:50:04.549099 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" path="/var/lib/kubelet/pods/059536a7-4148-47e1-a44d-acd83f8acd7b/volumes" Nov 21 16:50:11 crc kubenswrapper[4967]: I1121 16:50:11.537472 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:50:11 crc kubenswrapper[4967]: E1121 16:50:11.538367 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:50:22 crc kubenswrapper[4967]: I1121 16:50:22.551244 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:50:22 crc kubenswrapper[4967]: E1121 16:50:22.552410 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:50:37 crc kubenswrapper[4967]: I1121 16:50:37.536707 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:50:37 crc kubenswrapper[4967]: E1121 16:50:37.537593 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:50:51 crc kubenswrapper[4967]: I1121 16:50:51.538760 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:50:51 crc kubenswrapper[4967]: E1121 16:50:51.540687 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:51:02 crc kubenswrapper[4967]: I1121 16:51:02.545174 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:51:02 crc kubenswrapper[4967]: E1121 16:51:02.546408 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:51:15 crc kubenswrapper[4967]: I1121 16:51:15.536141 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:51:15 crc kubenswrapper[4967]: E1121 16:51:15.537073 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:51:26 crc kubenswrapper[4967]: I1121 16:51:26.536289 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:51:26 crc kubenswrapper[4967]: E1121 16:51:26.537236 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:51:39 crc kubenswrapper[4967]: I1121 16:51:39.538255 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:51:39 crc kubenswrapper[4967]: E1121 16:51:39.539958 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:51:54 crc kubenswrapper[4967]: I1121 16:51:54.537962 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:51:54 crc kubenswrapper[4967]: E1121 16:51:54.539399 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:52:09 crc kubenswrapper[4967]: I1121 16:52:09.536659 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:52:09 crc kubenswrapper[4967]: E1121 16:52:09.537496 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:52:20 crc kubenswrapper[4967]: I1121 16:52:20.536990 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:52:20 crc kubenswrapper[4967]: E1121 16:52:20.548648 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:52:33 crc kubenswrapper[4967]: I1121 16:52:33.537057 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:52:33 crc kubenswrapper[4967]: E1121 16:52:33.538284 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:52:48 crc kubenswrapper[4967]: I1121 16:52:48.537196 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:52:48 crc kubenswrapper[4967]: E1121 16:52:48.538551 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:52:59 crc kubenswrapper[4967]: I1121 16:52:59.537428 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:52:59 crc kubenswrapper[4967]: E1121 16:52:59.538172 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:53:11 crc kubenswrapper[4967]: I1121 16:53:11.537129 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:53:11 crc kubenswrapper[4967]: E1121 16:53:11.537904 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:53:26 crc kubenswrapper[4967]: I1121 16:53:26.537259 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:53:26 crc kubenswrapper[4967]: E1121 16:53:26.538261 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:53:37 crc kubenswrapper[4967]: I1121 16:53:37.536896 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:53:37 crc kubenswrapper[4967]: E1121 16:53:37.538054 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 16:53:49 crc kubenswrapper[4967]: I1121 16:53:49.535931 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:53:50 crc kubenswrapper[4967]: I1121 16:53:50.478942 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c"} Nov 21 16:56:16 crc kubenswrapper[4967]: I1121 16:56:16.522396 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:56:16 crc kubenswrapper[4967]: I1121 16:56:16.523397 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:56:46 crc kubenswrapper[4967]: I1121 16:56:46.522369 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:56:46 crc kubenswrapper[4967]: I1121 16:56:46.522917 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.522345 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.522958 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.523019 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.524168 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.524265 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c" gracePeriod=600 Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.932301 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c" exitCode=0 Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.932378 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c"} Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.932743 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287"} Nov 21 16:57:16 crc kubenswrapper[4967]: I1121 16:57:16.932773 4967 scope.go:117] "RemoveContainer" containerID="7e118692181a9e864ba64b9c2ef07c5f6bbeb646cc7f2b2276a1ed8e36049b52" Nov 21 16:59:16 crc kubenswrapper[4967]: I1121 16:59:16.522114 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:59:16 crc kubenswrapper[4967]: I1121 16:59:16.522682 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.987839 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989262 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="extract-utilities" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989281 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="extract-utilities" Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989341 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="extract-content" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989350 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="extract-content" Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989370 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="extract-content" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989379 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="extract-content" Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989400 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989408 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989426 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989433 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: E1121 16:59:44.989449 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="extract-utilities" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989458 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="extract-utilities" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989771 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc658808-a977-460d-833e-bdcf52c18e7c" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.989796 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="059536a7-4148-47e1-a44d-acd83f8acd7b" containerName="registry-server" Nov 21 16:59:44 crc kubenswrapper[4967]: I1121 16:59:44.992117 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.001692 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.028832 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.028906 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d22m9\" (UniqueName: \"kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.028986 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.130881 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.130972 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d22m9\" (UniqueName: \"kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.131083 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.131717 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.132072 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.155517 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d22m9\" (UniqueName: \"kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9\") pod \"redhat-marketplace-vt2vx\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.320723 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:45 crc kubenswrapper[4967]: I1121 16:59:45.874211 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 16:59:45 crc kubenswrapper[4967]: W1121 16:59:45.876912 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod619d1079_5810_438a_b50b_b18710cef9ab.slice/crio-c01cd3aafaaf27d9b3b6759c8a912d08a9117be5f8f4a9cd1c5e127493c3aac2 WatchSource:0}: Error finding container c01cd3aafaaf27d9b3b6759c8a912d08a9117be5f8f4a9cd1c5e127493c3aac2: Status 404 returned error can't find the container with id c01cd3aafaaf27d9b3b6759c8a912d08a9117be5f8f4a9cd1c5e127493c3aac2 Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.524778 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.525069 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.677077 4967 generic.go:334] "Generic (PLEG): container finished" podID="619d1079-5810-438a-b50b-b18710cef9ab" containerID="a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336" exitCode=0 Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.677145 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerDied","Data":"a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336"} Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.677184 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerStarted","Data":"c01cd3aafaaf27d9b3b6759c8a912d08a9117be5f8f4a9cd1c5e127493c3aac2"} Nov 21 16:59:46 crc kubenswrapper[4967]: I1121 16:59:46.680654 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 16:59:48 crc kubenswrapper[4967]: I1121 16:59:48.700927 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerStarted","Data":"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b"} Nov 21 16:59:49 crc kubenswrapper[4967]: I1121 16:59:49.715540 4967 generic.go:334] "Generic (PLEG): container finished" podID="619d1079-5810-438a-b50b-b18710cef9ab" containerID="133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b" exitCode=0 Nov 21 16:59:49 crc kubenswrapper[4967]: I1121 16:59:49.715645 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerDied","Data":"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b"} Nov 21 16:59:51 crc kubenswrapper[4967]: I1121 16:59:51.740502 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerStarted","Data":"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf"} Nov 21 16:59:51 crc kubenswrapper[4967]: I1121 16:59:51.765925 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vt2vx" podStartSLOduration=4.274925989 podStartE2EDuration="7.765907102s" podCreationTimestamp="2025-11-21 16:59:44 +0000 UTC" firstStartedPulling="2025-11-21 16:59:46.680290347 +0000 UTC m=+5074.938811355" lastFinishedPulling="2025-11-21 16:59:50.17127146 +0000 UTC m=+5078.429792468" observedRunningTime="2025-11-21 16:59:51.764189282 +0000 UTC m=+5080.022710300" watchObservedRunningTime="2025-11-21 16:59:51.765907102 +0000 UTC m=+5080.024428110" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.378193 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.382611 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.395921 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.460401 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.460485 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppznk\" (UniqueName: \"kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.460810 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.564846 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.564923 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppznk\" (UniqueName: \"kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.564984 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.565683 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.565954 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.594557 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.600095 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.614747 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppznk\" (UniqueName: \"kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk\") pod \"certified-operators-6r8jk\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.642822 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.669979 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.670044 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zd2l\" (UniqueName: \"kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.670105 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.713955 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.775282 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.774746 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.775394 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zd2l\" (UniqueName: \"kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.775440 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.775929 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:53 crc kubenswrapper[4967]: I1121 16:59:53.802581 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zd2l\" (UniqueName: \"kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l\") pod \"community-operators-j7bp7\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:54 crc kubenswrapper[4967]: I1121 16:59:54.003901 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 16:59:54 crc kubenswrapper[4967]: I1121 16:59:54.472836 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 16:59:54 crc kubenswrapper[4967]: I1121 16:59:54.776240 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerStarted","Data":"b86520da6b4e99c7914e6766bea12020f6e234191823750a9bb211c5574ba42f"} Nov 21 16:59:54 crc kubenswrapper[4967]: I1121 16:59:54.879371 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 16:59:54 crc kubenswrapper[4967]: W1121 16:59:54.884122 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod413bb8cb_da4c_4e01_9e46_938e4f07a256.slice/crio-88e018c8e07ddf29f6a161b3f4ad98f9551a29912d5e1db8a347b93bf191be59 WatchSource:0}: Error finding container 88e018c8e07ddf29f6a161b3f4ad98f9551a29912d5e1db8a347b93bf191be59: Status 404 returned error can't find the container with id 88e018c8e07ddf29f6a161b3f4ad98f9551a29912d5e1db8a347b93bf191be59 Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.322096 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.322160 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.386801 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.793831 4967 generic.go:334] "Generic (PLEG): container finished" podID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerID="4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0" exitCode=0 Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.793919 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerDied","Data":"4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0"} Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.795803 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerStarted","Data":"88e018c8e07ddf29f6a161b3f4ad98f9551a29912d5e1db8a347b93bf191be59"} Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.797974 4967 generic.go:334] "Generic (PLEG): container finished" podID="83c2635f-d413-430d-8a98-1c833f46fc24" containerID="3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9" exitCode=0 Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.799358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerDied","Data":"3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9"} Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.978739 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.990758 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:55 crc kubenswrapper[4967]: I1121 16:59:55.991572 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.058306 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.058417 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fql9r\" (UniqueName: \"kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.058905 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.161997 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.162123 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fql9r\" (UniqueName: \"kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.162401 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.162544 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.162931 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.184658 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fql9r\" (UniqueName: \"kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r\") pod \"redhat-operators-5rhlb\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.330817 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 16:59:56 crc kubenswrapper[4967]: I1121 16:59:56.874865 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 16:59:57 crc kubenswrapper[4967]: I1121 16:59:57.820710 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerStarted","Data":"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e"} Nov 21 16:59:57 crc kubenswrapper[4967]: I1121 16:59:57.823779 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerStarted","Data":"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261"} Nov 21 16:59:57 crc kubenswrapper[4967]: I1121 16:59:57.826312 4967 generic.go:334] "Generic (PLEG): container finished" podID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerID="51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606" exitCode=0 Nov 21 16:59:57 crc kubenswrapper[4967]: I1121 16:59:57.826378 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerDied","Data":"51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606"} Nov 21 16:59:57 crc kubenswrapper[4967]: I1121 16:59:57.826408 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerStarted","Data":"2de7d4ee5ab3c7395be6fc682e1d3254df8d8b63d14f349b5a855b403c6e2c76"} Nov 21 16:59:58 crc kubenswrapper[4967]: I1121 16:59:58.849076 4967 generic.go:334] "Generic (PLEG): container finished" podID="83c2635f-d413-430d-8a98-1c833f46fc24" containerID="fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261" exitCode=0 Nov 21 16:59:58 crc kubenswrapper[4967]: I1121 16:59:58.849134 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerDied","Data":"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261"} Nov 21 16:59:59 crc kubenswrapper[4967]: I1121 16:59:59.862306 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerStarted","Data":"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2"} Nov 21 16:59:59 crc kubenswrapper[4967]: I1121 16:59:59.866772 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerStarted","Data":"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98"} Nov 21 16:59:59 crc kubenswrapper[4967]: I1121 16:59:59.869483 4967 generic.go:334] "Generic (PLEG): container finished" podID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerID="1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e" exitCode=0 Nov 21 16:59:59 crc kubenswrapper[4967]: I1121 16:59:59.869527 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerDied","Data":"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e"} Nov 21 16:59:59 crc kubenswrapper[4967]: I1121 16:59:59.894318 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6r8jk" podStartSLOduration=3.220803942 podStartE2EDuration="6.894289362s" podCreationTimestamp="2025-11-21 16:59:53 +0000 UTC" firstStartedPulling="2025-11-21 16:59:55.801726629 +0000 UTC m=+5084.060247637" lastFinishedPulling="2025-11-21 16:59:59.475212049 +0000 UTC m=+5087.733733057" observedRunningTime="2025-11-21 16:59:59.890897304 +0000 UTC m=+5088.149418322" watchObservedRunningTime="2025-11-21 16:59:59.894289362 +0000 UTC m=+5088.152810370" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.162964 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq"] Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.165688 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.170402 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.176454 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.197352 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq"] Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.281643 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.281760 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8h2d\" (UniqueName: \"kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.281788 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.383727 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.383854 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8h2d\" (UniqueName: \"kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.383892 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.384825 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.391037 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.400193 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8h2d\" (UniqueName: \"kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d\") pod \"collect-profiles-29395740-mcstq\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.492727 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.887840 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerStarted","Data":"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b"} Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.911557 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j7bp7" podStartSLOduration=3.4168479449999998 podStartE2EDuration="7.911531616s" podCreationTimestamp="2025-11-21 16:59:53 +0000 UTC" firstStartedPulling="2025-11-21 16:59:55.798542737 +0000 UTC m=+5084.057063785" lastFinishedPulling="2025-11-21 17:00:00.293226448 +0000 UTC m=+5088.551747456" observedRunningTime="2025-11-21 17:00:00.904662799 +0000 UTC m=+5089.163183817" watchObservedRunningTime="2025-11-21 17:00:00.911531616 +0000 UTC m=+5089.170052624" Nov 21 17:00:00 crc kubenswrapper[4967]: I1121 17:00:00.961864 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq"] Nov 21 17:00:01 crc kubenswrapper[4967]: W1121 17:00:01.521591 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf288f452_99e9_4291_8f01_737ffb7610bd.slice/crio-ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0 WatchSource:0}: Error finding container ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0: Status 404 returned error can't find the container with id ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0 Nov 21 17:00:01 crc kubenswrapper[4967]: I1121 17:00:01.900375 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" event={"ID":"f288f452-99e9-4291-8f01-737ffb7610bd","Type":"ContainerStarted","Data":"0dce33fc76bce54d70e276140e9f003c1001bbe3e3857b80e9f3eef4d4a1fe9f"} Nov 21 17:00:01 crc kubenswrapper[4967]: I1121 17:00:01.900824 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" event={"ID":"f288f452-99e9-4291-8f01-737ffb7610bd","Type":"ContainerStarted","Data":"ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0"} Nov 21 17:00:01 crc kubenswrapper[4967]: I1121 17:00:01.965514 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" podStartSLOduration=1.965485402 podStartE2EDuration="1.965485402s" podCreationTimestamp="2025-11-21 17:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:00:01.918158077 +0000 UTC m=+5090.176679085" watchObservedRunningTime="2025-11-21 17:00:01.965485402 +0000 UTC m=+5090.224006410" Nov 21 17:00:02 crc kubenswrapper[4967]: I1121 17:00:02.918444 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerDied","Data":"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98"} Nov 21 17:00:02 crc kubenswrapper[4967]: I1121 17:00:02.918720 4967 generic.go:334] "Generic (PLEG): container finished" podID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerID="6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98" exitCode=0 Nov 21 17:00:02 crc kubenswrapper[4967]: I1121 17:00:02.925906 4967 generic.go:334] "Generic (PLEG): container finished" podID="f288f452-99e9-4291-8f01-737ffb7610bd" containerID="0dce33fc76bce54d70e276140e9f003c1001bbe3e3857b80e9f3eef4d4a1fe9f" exitCode=0 Nov 21 17:00:02 crc kubenswrapper[4967]: I1121 17:00:02.925949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" event={"ID":"f288f452-99e9-4291-8f01-737ffb7610bd","Type":"ContainerDied","Data":"0dce33fc76bce54d70e276140e9f003c1001bbe3e3857b80e9f3eef4d4a1fe9f"} Nov 21 17:00:03 crc kubenswrapper[4967]: I1121 17:00:03.714440 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:03 crc kubenswrapper[4967]: I1121 17:00:03.715025 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.005099 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.006412 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.430733 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.506724 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume\") pod \"f288f452-99e9-4291-8f01-737ffb7610bd\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.506804 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8h2d\" (UniqueName: \"kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d\") pod \"f288f452-99e9-4291-8f01-737ffb7610bd\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.506975 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume\") pod \"f288f452-99e9-4291-8f01-737ffb7610bd\" (UID: \"f288f452-99e9-4291-8f01-737ffb7610bd\") " Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.507837 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume" (OuterVolumeSpecName: "config-volume") pod "f288f452-99e9-4291-8f01-737ffb7610bd" (UID: "f288f452-99e9-4291-8f01-737ffb7610bd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.514772 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f288f452-99e9-4291-8f01-737ffb7610bd" (UID: "f288f452-99e9-4291-8f01-737ffb7610bd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.520146 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d" (OuterVolumeSpecName: "kube-api-access-f8h2d") pod "f288f452-99e9-4291-8f01-737ffb7610bd" (UID: "f288f452-99e9-4291-8f01-737ffb7610bd"). InnerVolumeSpecName "kube-api-access-f8h2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.610408 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f288f452-99e9-4291-8f01-737ffb7610bd-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.610439 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8h2d\" (UniqueName: \"kubernetes.io/projected/f288f452-99e9-4291-8f01-737ffb7610bd-kube-api-access-f8h2d\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.610449 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f288f452-99e9-4291-8f01-737ffb7610bd-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.956355 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerStarted","Data":"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79"} Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.963210 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.963189 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq" event={"ID":"f288f452-99e9-4291-8f01-737ffb7610bd","Type":"ContainerDied","Data":"ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0"} Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.963381 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac3a486108dc747b8b5fdfe56d117fbea7888bbf91ebaf8b2b4db7ded42c54c0" Nov 21 17:00:04 crc kubenswrapper[4967]: I1121 17:00:04.984042 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5rhlb" podStartSLOduration=4.031274604 podStartE2EDuration="9.984026194s" podCreationTimestamp="2025-11-21 16:59:55 +0000 UTC" firstStartedPulling="2025-11-21 16:59:57.82897491 +0000 UTC m=+5086.087495918" lastFinishedPulling="2025-11-21 17:00:03.7817265 +0000 UTC m=+5092.040247508" observedRunningTime="2025-11-21 17:00:04.982000266 +0000 UTC m=+5093.240521274" watchObservedRunningTime="2025-11-21 17:00:04.984026194 +0000 UTC m=+5093.242547202" Nov 21 17:00:05 crc kubenswrapper[4967]: I1121 17:00:05.034368 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh"] Nov 21 17:00:05 crc kubenswrapper[4967]: I1121 17:00:05.046467 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395695-476vh"] Nov 21 17:00:05 crc kubenswrapper[4967]: I1121 17:00:05.054054 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-6r8jk" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="registry-server" probeResult="failure" output=< Nov 21 17:00:05 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:00:05 crc kubenswrapper[4967]: > Nov 21 17:00:05 crc kubenswrapper[4967]: I1121 17:00:05.071221 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-j7bp7" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="registry-server" probeResult="failure" output=< Nov 21 17:00:05 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:00:05 crc kubenswrapper[4967]: > Nov 21 17:00:05 crc kubenswrapper[4967]: I1121 17:00:05.381895 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 17:00:06 crc kubenswrapper[4967]: I1121 17:00:06.331712 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:06 crc kubenswrapper[4967]: I1121 17:00:06.331775 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:06 crc kubenswrapper[4967]: I1121 17:00:06.569516 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86aecfaf-c4d7-408b-85f2-fa9a09152d7d" path="/var/lib/kubelet/pods/86aecfaf-c4d7-408b-85f2-fa9a09152d7d/volumes" Nov 21 17:00:07 crc kubenswrapper[4967]: I1121 17:00:07.362198 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 17:00:07 crc kubenswrapper[4967]: I1121 17:00:07.362457 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vt2vx" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="registry-server" containerID="cri-o://195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf" gracePeriod=2 Nov 21 17:00:07 crc kubenswrapper[4967]: I1121 17:00:07.383029 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5rhlb" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" probeResult="failure" output=< Nov 21 17:00:07 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:00:07 crc kubenswrapper[4967]: > Nov 21 17:00:07 crc kubenswrapper[4967]: I1121 17:00:07.939584 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.002295 4967 generic.go:334] "Generic (PLEG): container finished" podID="619d1079-5810-438a-b50b-b18710cef9ab" containerID="195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf" exitCode=0 Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.002358 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerDied","Data":"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf"} Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.002430 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vt2vx" event={"ID":"619d1079-5810-438a-b50b-b18710cef9ab","Type":"ContainerDied","Data":"c01cd3aafaaf27d9b3b6759c8a912d08a9117be5f8f4a9cd1c5e127493c3aac2"} Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.002439 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vt2vx" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.002458 4967 scope.go:117] "RemoveContainer" containerID="195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.005354 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities\") pod \"619d1079-5810-438a-b50b-b18710cef9ab\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.005480 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d22m9\" (UniqueName: \"kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9\") pod \"619d1079-5810-438a-b50b-b18710cef9ab\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.006026 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content\") pod \"619d1079-5810-438a-b50b-b18710cef9ab\" (UID: \"619d1079-5810-438a-b50b-b18710cef9ab\") " Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.015657 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities" (OuterVolumeSpecName: "utilities") pod "619d1079-5810-438a-b50b-b18710cef9ab" (UID: "619d1079-5810-438a-b50b-b18710cef9ab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.018955 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9" (OuterVolumeSpecName: "kube-api-access-d22m9") pod "619d1079-5810-438a-b50b-b18710cef9ab" (UID: "619d1079-5810-438a-b50b-b18710cef9ab"). InnerVolumeSpecName "kube-api-access-d22m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.022693 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "619d1079-5810-438a-b50b-b18710cef9ab" (UID: "619d1079-5810-438a-b50b-b18710cef9ab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.031096 4967 scope.go:117] "RemoveContainer" containerID="133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.090943 4967 scope.go:117] "RemoveContainer" containerID="a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.110833 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.110876 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d22m9\" (UniqueName: \"kubernetes.io/projected/619d1079-5810-438a-b50b-b18710cef9ab-kube-api-access-d22m9\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.110893 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/619d1079-5810-438a-b50b-b18710cef9ab-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.148146 4967 scope.go:117] "RemoveContainer" containerID="195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf" Nov 21 17:00:08 crc kubenswrapper[4967]: E1121 17:00:08.148724 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf\": container with ID starting with 195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf not found: ID does not exist" containerID="195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.148762 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf"} err="failed to get container status \"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf\": rpc error: code = NotFound desc = could not find container \"195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf\": container with ID starting with 195f50ad6929b2f4e936d9ceefb285496fe7b208f3dfcabe1003e869d14ba7bf not found: ID does not exist" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.148792 4967 scope.go:117] "RemoveContainer" containerID="133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b" Nov 21 17:00:08 crc kubenswrapper[4967]: E1121 17:00:08.149115 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b\": container with ID starting with 133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b not found: ID does not exist" containerID="133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.149142 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b"} err="failed to get container status \"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b\": rpc error: code = NotFound desc = could not find container \"133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b\": container with ID starting with 133ffa8678a653089b5dbcda7874019397ded84ec7a589b4b07feb4d8fcd7f7b not found: ID does not exist" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.149164 4967 scope.go:117] "RemoveContainer" containerID="a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336" Nov 21 17:00:08 crc kubenswrapper[4967]: E1121 17:00:08.149660 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336\": container with ID starting with a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336 not found: ID does not exist" containerID="a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.149693 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336"} err="failed to get container status \"a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336\": rpc error: code = NotFound desc = could not find container \"a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336\": container with ID starting with a151752f2a945e81dab83bafafd952112d6974672affb187546478bacc8b8336 not found: ID does not exist" Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.347479 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.364676 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vt2vx"] Nov 21 17:00:08 crc kubenswrapper[4967]: I1121 17:00:08.556567 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="619d1079-5810-438a-b50b-b18710cef9ab" path="/var/lib/kubelet/pods/619d1079-5810-438a-b50b-b18710cef9ab/volumes" Nov 21 17:00:13 crc kubenswrapper[4967]: I1121 17:00:13.778482 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:13 crc kubenswrapper[4967]: I1121 17:00:13.845258 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:14 crc kubenswrapper[4967]: I1121 17:00:14.020147 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 17:00:14 crc kubenswrapper[4967]: I1121 17:00:14.060221 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:14 crc kubenswrapper[4967]: I1121 17:00:14.113837 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.085380 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6r8jk" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="registry-server" containerID="cri-o://47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2" gracePeriod=2 Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.657368 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.733916 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppznk\" (UniqueName: \"kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk\") pod \"83c2635f-d413-430d-8a98-1c833f46fc24\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.734180 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content\") pod \"83c2635f-d413-430d-8a98-1c833f46fc24\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.734342 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities\") pod \"83c2635f-d413-430d-8a98-1c833f46fc24\" (UID: \"83c2635f-d413-430d-8a98-1c833f46fc24\") " Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.734729 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities" (OuterVolumeSpecName: "utilities") pod "83c2635f-d413-430d-8a98-1c833f46fc24" (UID: "83c2635f-d413-430d-8a98-1c833f46fc24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.735395 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.741384 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk" (OuterVolumeSpecName: "kube-api-access-ppznk") pod "83c2635f-d413-430d-8a98-1c833f46fc24" (UID: "83c2635f-d413-430d-8a98-1c833f46fc24"). InnerVolumeSpecName "kube-api-access-ppznk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.791881 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "83c2635f-d413-430d-8a98-1c833f46fc24" (UID: "83c2635f-d413-430d-8a98-1c833f46fc24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.837192 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppznk\" (UniqueName: \"kubernetes.io/projected/83c2635f-d413-430d-8a98-1c833f46fc24-kube-api-access-ppznk\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:15 crc kubenswrapper[4967]: I1121 17:00:15.837224 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/83c2635f-d413-430d-8a98-1c833f46fc24-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.097554 4967 generic.go:334] "Generic (PLEG): container finished" podID="83c2635f-d413-430d-8a98-1c833f46fc24" containerID="47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2" exitCode=0 Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.097606 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerDied","Data":"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2"} Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.097633 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6r8jk" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.097921 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6r8jk" event={"ID":"83c2635f-d413-430d-8a98-1c833f46fc24","Type":"ContainerDied","Data":"b86520da6b4e99c7914e6766bea12020f6e234191823750a9bb211c5574ba42f"} Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.097946 4967 scope.go:117] "RemoveContainer" containerID="47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.122081 4967 scope.go:117] "RemoveContainer" containerID="fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.134991 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.149261 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6r8jk"] Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.167546 4967 scope.go:117] "RemoveContainer" containerID="3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.212252 4967 scope.go:117] "RemoveContainer" containerID="47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2" Nov 21 17:00:16 crc kubenswrapper[4967]: E1121 17:00:16.212904 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2\": container with ID starting with 47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2 not found: ID does not exist" containerID="47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.212960 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2"} err="failed to get container status \"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2\": rpc error: code = NotFound desc = could not find container \"47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2\": container with ID starting with 47ddb0e4a4c50add8c526818049248f55c5a864bfe2be544933a3c80fa8eb4c2 not found: ID does not exist" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.212992 4967 scope.go:117] "RemoveContainer" containerID="fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261" Nov 21 17:00:16 crc kubenswrapper[4967]: E1121 17:00:16.213432 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261\": container with ID starting with fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261 not found: ID does not exist" containerID="fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.213475 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261"} err="failed to get container status \"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261\": rpc error: code = NotFound desc = could not find container \"fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261\": container with ID starting with fc7e55929a1549e38e59a5a7c474c6b284960560683ac5a2ac6ce6f3b6577261 not found: ID does not exist" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.213507 4967 scope.go:117] "RemoveContainer" containerID="3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9" Nov 21 17:00:16 crc kubenswrapper[4967]: E1121 17:00:16.214011 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9\": container with ID starting with 3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9 not found: ID does not exist" containerID="3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.214038 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9"} err="failed to get container status \"3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9\": rpc error: code = NotFound desc = could not find container \"3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9\": container with ID starting with 3ae33f1abf7e5ce67d8cd1fa76e01b52f8ca24e83e3fe4610a1dc4fdd3aff6b9 not found: ID does not exist" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.418190 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.418468 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-j7bp7" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="registry-server" containerID="cri-o://d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b" gracePeriod=2 Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.522059 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.522128 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.522188 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.523166 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.523230 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" gracePeriod=600 Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.553811 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" path="/var/lib/kubelet/pods/83c2635f-d413-430d-8a98-1c833f46fc24/volumes" Nov 21 17:00:16 crc kubenswrapper[4967]: E1121 17:00:16.660767 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:00:16 crc kubenswrapper[4967]: I1121 17:00:16.980930 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.067405 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities\") pod \"413bb8cb-da4c-4e01-9e46-938e4f07a256\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.067575 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zd2l\" (UniqueName: \"kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l\") pod \"413bb8cb-da4c-4e01-9e46-938e4f07a256\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.067643 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content\") pod \"413bb8cb-da4c-4e01-9e46-938e4f07a256\" (UID: \"413bb8cb-da4c-4e01-9e46-938e4f07a256\") " Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.068119 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities" (OuterVolumeSpecName: "utilities") pod "413bb8cb-da4c-4e01-9e46-938e4f07a256" (UID: "413bb8cb-da4c-4e01-9e46-938e4f07a256"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.068383 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.074004 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l" (OuterVolumeSpecName: "kube-api-access-8zd2l") pod "413bb8cb-da4c-4e01-9e46-938e4f07a256" (UID: "413bb8cb-da4c-4e01-9e46-938e4f07a256"). InnerVolumeSpecName "kube-api-access-8zd2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.110951 4967 generic.go:334] "Generic (PLEG): container finished" podID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerID="d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b" exitCode=0 Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.111062 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerDied","Data":"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b"} Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.111104 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j7bp7" event={"ID":"413bb8cb-da4c-4e01-9e46-938e4f07a256","Type":"ContainerDied","Data":"88e018c8e07ddf29f6a161b3f4ad98f9551a29912d5e1db8a347b93bf191be59"} Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.111116 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j7bp7" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.111129 4967 scope.go:117] "RemoveContainer" containerID="d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.115381 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "413bb8cb-da4c-4e01-9e46-938e4f07a256" (UID: "413bb8cb-da4c-4e01-9e46-938e4f07a256"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.115898 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" exitCode=0 Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.115982 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287"} Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.117097 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:00:17 crc kubenswrapper[4967]: E1121 17:00:17.117613 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.149649 4967 scope.go:117] "RemoveContainer" containerID="1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.170664 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zd2l\" (UniqueName: \"kubernetes.io/projected/413bb8cb-da4c-4e01-9e46-938e4f07a256-kube-api-access-8zd2l\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.170702 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/413bb8cb-da4c-4e01-9e46-938e4f07a256-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.187359 4967 scope.go:117] "RemoveContainer" containerID="4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.271793 4967 scope.go:117] "RemoveContainer" containerID="d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b" Nov 21 17:00:17 crc kubenswrapper[4967]: E1121 17:00:17.272302 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b\": container with ID starting with d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b not found: ID does not exist" containerID="d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.272436 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b"} err="failed to get container status \"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b\": rpc error: code = NotFound desc = could not find container \"d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b\": container with ID starting with d6fe2d44b6b82b36c0a88dcefe786c6ad0ebe453afd96b37a5e5dad9bf01b20b not found: ID does not exist" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.272465 4967 scope.go:117] "RemoveContainer" containerID="1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e" Nov 21 17:00:17 crc kubenswrapper[4967]: E1121 17:00:17.273016 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e\": container with ID starting with 1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e not found: ID does not exist" containerID="1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.273039 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e"} err="failed to get container status \"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e\": rpc error: code = NotFound desc = could not find container \"1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e\": container with ID starting with 1383d0f87e93711dcda04efd0ab9b35b602449e43904b0fbc1f541f1b241ff3e not found: ID does not exist" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.273055 4967 scope.go:117] "RemoveContainer" containerID="4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0" Nov 21 17:00:17 crc kubenswrapper[4967]: E1121 17:00:17.273475 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0\": container with ID starting with 4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0 not found: ID does not exist" containerID="4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.273506 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0"} err="failed to get container status \"4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0\": rpc error: code = NotFound desc = could not find container \"4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0\": container with ID starting with 4b971fbc64144bdc361f07a303b905a6233c8bff5ed92a3b48c71aa7ce1cd1b0 not found: ID does not exist" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.273526 4967 scope.go:117] "RemoveContainer" containerID="49b5a2c57d91b8c638bc293cbe4b1ce02b0eb0de7ec6b9d6607f6ec99935765c" Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.383534 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5rhlb" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" probeResult="failure" output=< Nov 21 17:00:17 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:00:17 crc kubenswrapper[4967]: > Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.452202 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 17:00:17 crc kubenswrapper[4967]: I1121 17:00:17.462384 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-j7bp7"] Nov 21 17:00:18 crc kubenswrapper[4967]: I1121 17:00:18.552217 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" path="/var/lib/kubelet/pods/413bb8cb-da4c-4e01-9e46-938e4f07a256/volumes" Nov 21 17:00:24 crc kubenswrapper[4967]: I1121 17:00:24.878816 4967 scope.go:117] "RemoveContainer" containerID="7af3ba61af5de3b05d3c35f71d319784aa585c1b40396d014ca3569c6ff09949" Nov 21 17:00:27 crc kubenswrapper[4967]: I1121 17:00:27.401025 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5rhlb" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" probeResult="failure" output=< Nov 21 17:00:27 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:00:27 crc kubenswrapper[4967]: > Nov 21 17:00:27 crc kubenswrapper[4967]: I1121 17:00:27.537374 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:00:27 crc kubenswrapper[4967]: E1121 17:00:27.537772 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:00:36 crc kubenswrapper[4967]: I1121 17:00:36.402685 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:36 crc kubenswrapper[4967]: I1121 17:00:36.463866 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:36 crc kubenswrapper[4967]: I1121 17:00:36.647856 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 17:00:38 crc kubenswrapper[4967]: I1121 17:00:38.362204 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5rhlb" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" containerID="cri-o://f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79" gracePeriod=2 Nov 21 17:00:38 crc kubenswrapper[4967]: I1121 17:00:38.928947 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.057541 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities\") pod \"2e1934ba-08f4-40a8-a038-59c57930e10f\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.057593 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content\") pod \"2e1934ba-08f4-40a8-a038-59c57930e10f\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.057656 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fql9r\" (UniqueName: \"kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r\") pod \"2e1934ba-08f4-40a8-a038-59c57930e10f\" (UID: \"2e1934ba-08f4-40a8-a038-59c57930e10f\") " Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.058559 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities" (OuterVolumeSpecName: "utilities") pod "2e1934ba-08f4-40a8-a038-59c57930e10f" (UID: "2e1934ba-08f4-40a8-a038-59c57930e10f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.058724 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.063606 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r" (OuterVolumeSpecName: "kube-api-access-fql9r") pod "2e1934ba-08f4-40a8-a038-59c57930e10f" (UID: "2e1934ba-08f4-40a8-a038-59c57930e10f"). InnerVolumeSpecName "kube-api-access-fql9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.152252 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e1934ba-08f4-40a8-a038-59c57930e10f" (UID: "2e1934ba-08f4-40a8-a038-59c57930e10f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.161882 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e1934ba-08f4-40a8-a038-59c57930e10f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.161926 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fql9r\" (UniqueName: \"kubernetes.io/projected/2e1934ba-08f4-40a8-a038-59c57930e10f-kube-api-access-fql9r\") on node \"crc\" DevicePath \"\"" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.380230 4967 generic.go:334] "Generic (PLEG): container finished" podID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerID="f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79" exitCode=0 Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.380282 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerDied","Data":"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79"} Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.380352 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5rhlb" event={"ID":"2e1934ba-08f4-40a8-a038-59c57930e10f","Type":"ContainerDied","Data":"2de7d4ee5ab3c7395be6fc682e1d3254df8d8b63d14f349b5a855b403c6e2c76"} Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.380379 4967 scope.go:117] "RemoveContainer" containerID="f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.380568 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5rhlb" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.416927 4967 scope.go:117] "RemoveContainer" containerID="6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.416934 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.428475 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5rhlb"] Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.439596 4967 scope.go:117] "RemoveContainer" containerID="51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.501048 4967 scope.go:117] "RemoveContainer" containerID="f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79" Nov 21 17:00:39 crc kubenswrapper[4967]: E1121 17:00:39.501937 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79\": container with ID starting with f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79 not found: ID does not exist" containerID="f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.501982 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79"} err="failed to get container status \"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79\": rpc error: code = NotFound desc = could not find container \"f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79\": container with ID starting with f1a39977e17823197a9693b7e8eda6bd231d48a4e2e04246cfc58406bf746c79 not found: ID does not exist" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.502023 4967 scope.go:117] "RemoveContainer" containerID="6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98" Nov 21 17:00:39 crc kubenswrapper[4967]: E1121 17:00:39.502666 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98\": container with ID starting with 6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98 not found: ID does not exist" containerID="6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.502703 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98"} err="failed to get container status \"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98\": rpc error: code = NotFound desc = could not find container \"6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98\": container with ID starting with 6c2f5370108495eb127fecfc0c7a2fd995be246cf300075c6aac9fc68e06ac98 not found: ID does not exist" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.502731 4967 scope.go:117] "RemoveContainer" containerID="51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606" Nov 21 17:00:39 crc kubenswrapper[4967]: E1121 17:00:39.504804 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606\": container with ID starting with 51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606 not found: ID does not exist" containerID="51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606" Nov 21 17:00:39 crc kubenswrapper[4967]: I1121 17:00:39.504857 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606"} err="failed to get container status \"51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606\": rpc error: code = NotFound desc = could not find container \"51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606\": container with ID starting with 51bbf5da60ab0278b159ff1523b72d3720c972de3a5331b2cb4bfa8362b47606 not found: ID does not exist" Nov 21 17:00:40 crc kubenswrapper[4967]: I1121 17:00:40.540623 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:00:40 crc kubenswrapper[4967]: E1121 17:00:40.541007 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:00:40 crc kubenswrapper[4967]: I1121 17:00:40.549734 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" path="/var/lib/kubelet/pods/2e1934ba-08f4-40a8-a038-59c57930e10f/volumes" Nov 21 17:00:52 crc kubenswrapper[4967]: I1121 17:00:52.555726 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:00:52 crc kubenswrapper[4967]: E1121 17:00:52.556897 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.155575 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29395741-7nk8d"] Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156732 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156748 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156768 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156777 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156800 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156808 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156828 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156835 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156850 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156855 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156869 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156876 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156903 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f288f452-99e9-4291-8f01-737ffb7610bd" containerName="collect-profiles" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156912 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f288f452-99e9-4291-8f01-737ffb7610bd" containerName="collect-profiles" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156934 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156944 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156961 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156967 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.156989 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.156995 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.157011 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157017 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="extract-content" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.157025 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157030 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="extract-utilities" Nov 21 17:01:00 crc kubenswrapper[4967]: E1121 17:01:00.157038 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157043 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157245 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="83c2635f-d413-430d-8a98-1c833f46fc24" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157263 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="413bb8cb-da4c-4e01-9e46-938e4f07a256" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157276 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e1934ba-08f4-40a8-a038-59c57930e10f" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157335 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="619d1079-5810-438a-b50b-b18710cef9ab" containerName="registry-server" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.157348 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f288f452-99e9-4291-8f01-737ffb7610bd" containerName="collect-profiles" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.230257 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.259290 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395741-7nk8d"] Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.291748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.291881 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.292019 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgmdb\" (UniqueName: \"kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.292100 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.395129 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.395789 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.395905 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.396079 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgmdb\" (UniqueName: \"kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.711656 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.711716 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.712298 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgmdb\" (UniqueName: \"kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.712760 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data\") pod \"keystone-cron-29395741-7nk8d\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:00 crc kubenswrapper[4967]: I1121 17:01:00.857155 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:01 crc kubenswrapper[4967]: I1121 17:01:01.319385 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395741-7nk8d"] Nov 21 17:01:01 crc kubenswrapper[4967]: I1121 17:01:01.628257 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395741-7nk8d" event={"ID":"97c2ceb6-473f-408c-8e7b-796f7f655f68","Type":"ContainerStarted","Data":"3320c602633caeb9741328d742b557ddaf0ab6897d971fa3bf01489752a3ceb8"} Nov 21 17:01:01 crc kubenswrapper[4967]: I1121 17:01:01.630609 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395741-7nk8d" event={"ID":"97c2ceb6-473f-408c-8e7b-796f7f655f68","Type":"ContainerStarted","Data":"d699a4174b49074a400205b3c42e7c3e16acdb98dd5d16733cabfaa1209a217c"} Nov 21 17:01:01 crc kubenswrapper[4967]: I1121 17:01:01.652253 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29395741-7nk8d" podStartSLOduration=1.65223438 podStartE2EDuration="1.65223438s" podCreationTimestamp="2025-11-21 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:01:01.642760638 +0000 UTC m=+5149.901281666" watchObservedRunningTime="2025-11-21 17:01:01.65223438 +0000 UTC m=+5149.910755388" Nov 21 17:01:04 crc kubenswrapper[4967]: I1121 17:01:04.537727 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:01:04 crc kubenswrapper[4967]: E1121 17:01:04.538640 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:01:04 crc kubenswrapper[4967]: I1121 17:01:04.660740 4967 generic.go:334] "Generic (PLEG): container finished" podID="97c2ceb6-473f-408c-8e7b-796f7f655f68" containerID="3320c602633caeb9741328d742b557ddaf0ab6897d971fa3bf01489752a3ceb8" exitCode=0 Nov 21 17:01:04 crc kubenswrapper[4967]: I1121 17:01:04.660786 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395741-7nk8d" event={"ID":"97c2ceb6-473f-408c-8e7b-796f7f655f68","Type":"ContainerDied","Data":"3320c602633caeb9741328d742b557ddaf0ab6897d971fa3bf01489752a3ceb8"} Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.138977 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.148894 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgmdb\" (UniqueName: \"kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb\") pod \"97c2ceb6-473f-408c-8e7b-796f7f655f68\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.148990 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle\") pod \"97c2ceb6-473f-408c-8e7b-796f7f655f68\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.149061 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys\") pod \"97c2ceb6-473f-408c-8e7b-796f7f655f68\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.149249 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data\") pod \"97c2ceb6-473f-408c-8e7b-796f7f655f68\" (UID: \"97c2ceb6-473f-408c-8e7b-796f7f655f68\") " Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.164208 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "97c2ceb6-473f-408c-8e7b-796f7f655f68" (UID: "97c2ceb6-473f-408c-8e7b-796f7f655f68"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.164433 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb" (OuterVolumeSpecName: "kube-api-access-zgmdb") pod "97c2ceb6-473f-408c-8e7b-796f7f655f68" (UID: "97c2ceb6-473f-408c-8e7b-796f7f655f68"). InnerVolumeSpecName "kube-api-access-zgmdb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.189861 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97c2ceb6-473f-408c-8e7b-796f7f655f68" (UID: "97c2ceb6-473f-408c-8e7b-796f7f655f68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.218300 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data" (OuterVolumeSpecName: "config-data") pod "97c2ceb6-473f-408c-8e7b-796f7f655f68" (UID: "97c2ceb6-473f-408c-8e7b-796f7f655f68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.252643 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.252679 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgmdb\" (UniqueName: \"kubernetes.io/projected/97c2ceb6-473f-408c-8e7b-796f7f655f68-kube-api-access-zgmdb\") on node \"crc\" DevicePath \"\"" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.252691 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.252699 4967 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/97c2ceb6-473f-408c-8e7b-796f7f655f68-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.684164 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395741-7nk8d" event={"ID":"97c2ceb6-473f-408c-8e7b-796f7f655f68","Type":"ContainerDied","Data":"d699a4174b49074a400205b3c42e7c3e16acdb98dd5d16733cabfaa1209a217c"} Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.684513 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d699a4174b49074a400205b3c42e7c3e16acdb98dd5d16733cabfaa1209a217c" Nov 21 17:01:06 crc kubenswrapper[4967]: I1121 17:01:06.684264 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395741-7nk8d" Nov 21 17:01:18 crc kubenswrapper[4967]: I1121 17:01:18.538602 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:01:18 crc kubenswrapper[4967]: E1121 17:01:18.539504 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:01:31 crc kubenswrapper[4967]: I1121 17:01:31.536838 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:01:31 crc kubenswrapper[4967]: E1121 17:01:31.539098 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:01:45 crc kubenswrapper[4967]: I1121 17:01:45.536668 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:01:45 crc kubenswrapper[4967]: E1121 17:01:45.537707 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:01:58 crc kubenswrapper[4967]: I1121 17:01:58.536029 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:01:58 crc kubenswrapper[4967]: E1121 17:01:58.536891 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:02:09 crc kubenswrapper[4967]: I1121 17:02:09.536616 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:02:09 crc kubenswrapper[4967]: E1121 17:02:09.537647 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:02:23 crc kubenswrapper[4967]: I1121 17:02:23.536491 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:02:23 crc kubenswrapper[4967]: E1121 17:02:23.537141 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:02:28 crc kubenswrapper[4967]: I1121 17:02:28.808926 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="b3d39ab9-f219-4af5-b82c-102fefaff9bc" containerName="galera" probeResult="failure" output="command timed out" Nov 21 17:02:28 crc kubenswrapper[4967]: I1121 17:02:28.809916 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="b3d39ab9-f219-4af5-b82c-102fefaff9bc" containerName="galera" probeResult="failure" output="command timed out" Nov 21 17:02:38 crc kubenswrapper[4967]: I1121 17:02:38.537706 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:02:38 crc kubenswrapper[4967]: E1121 17:02:38.538783 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:02:52 crc kubenswrapper[4967]: I1121 17:02:52.555385 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:02:52 crc kubenswrapper[4967]: E1121 17:02:52.556855 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:03:04 crc kubenswrapper[4967]: I1121 17:03:04.536977 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:03:04 crc kubenswrapper[4967]: E1121 17:03:04.538032 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:03:16 crc kubenswrapper[4967]: I1121 17:03:16.537882 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:03:16 crc kubenswrapper[4967]: E1121 17:03:16.539138 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:03:31 crc kubenswrapper[4967]: I1121 17:03:31.536015 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:03:31 crc kubenswrapper[4967]: E1121 17:03:31.538244 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:03:43 crc kubenswrapper[4967]: I1121 17:03:43.536139 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:03:43 crc kubenswrapper[4967]: E1121 17:03:43.537040 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:03:58 crc kubenswrapper[4967]: I1121 17:03:58.537648 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:03:58 crc kubenswrapper[4967]: E1121 17:03:58.538919 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:04:13 crc kubenswrapper[4967]: I1121 17:04:13.538002 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:04:13 crc kubenswrapper[4967]: E1121 17:04:13.539547 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:04:26 crc kubenswrapper[4967]: I1121 17:04:26.536033 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:04:26 crc kubenswrapper[4967]: E1121 17:04:26.537133 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:04:38 crc kubenswrapper[4967]: I1121 17:04:38.536842 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:04:38 crc kubenswrapper[4967]: E1121 17:04:38.537841 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:04:52 crc kubenswrapper[4967]: I1121 17:04:52.556554 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:04:52 crc kubenswrapper[4967]: E1121 17:04:52.557575 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:05:07 crc kubenswrapper[4967]: I1121 17:05:07.536881 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:05:07 crc kubenswrapper[4967]: E1121 17:05:07.538734 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:05:22 crc kubenswrapper[4967]: I1121 17:05:22.545285 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:05:23 crc kubenswrapper[4967]: I1121 17:05:23.600121 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c"} Nov 21 17:07:46 crc kubenswrapper[4967]: I1121 17:07:46.521853 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:07:46 crc kubenswrapper[4967]: I1121 17:07:46.522399 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:08:16 crc kubenswrapper[4967]: I1121 17:08:16.522302 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:08:16 crc kubenswrapper[4967]: I1121 17:08:16.523306 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:08:46 crc kubenswrapper[4967]: I1121 17:08:46.522597 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:08:46 crc kubenswrapper[4967]: I1121 17:08:46.523184 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:08:46 crc kubenswrapper[4967]: I1121 17:08:46.523247 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:08:46 crc kubenswrapper[4967]: I1121 17:08:46.524131 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:08:46 crc kubenswrapper[4967]: I1121 17:08:46.524182 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c" gracePeriod=600 Nov 21 17:08:47 crc kubenswrapper[4967]: I1121 17:08:47.188427 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c" exitCode=0 Nov 21 17:08:47 crc kubenswrapper[4967]: I1121 17:08:47.188522 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c"} Nov 21 17:08:47 crc kubenswrapper[4967]: I1121 17:08:47.189222 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d"} Nov 21 17:08:47 crc kubenswrapper[4967]: I1121 17:08:47.189250 4967 scope.go:117] "RemoveContainer" containerID="e643fb2f75cc5d257cf35a48bff5ddf596027be4bdbc985487f0d1bd27d75287" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.267236 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:13 crc kubenswrapper[4967]: E1121 17:10:13.270105 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97c2ceb6-473f-408c-8e7b-796f7f655f68" containerName="keystone-cron" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.270142 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="97c2ceb6-473f-408c-8e7b-796f7f655f68" containerName="keystone-cron" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.271167 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="97c2ceb6-473f-408c-8e7b-796f7f655f68" containerName="keystone-cron" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.275050 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.304981 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.432258 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.432432 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbhn2\" (UniqueName: \"kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.432481 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.535118 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.535276 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbhn2\" (UniqueName: \"kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.535345 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.535991 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.536290 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.561155 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbhn2\" (UniqueName: \"kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2\") pod \"community-operators-z5mtx\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:13 crc kubenswrapper[4967]: I1121 17:10:13.615862 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:14 crc kubenswrapper[4967]: I1121 17:10:14.252051 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:14 crc kubenswrapper[4967]: I1121 17:10:14.321616 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerStarted","Data":"7fb8df0961931145a2a2e7c7f09c97250675b6b30ed7924cadb4453c3f12b9b3"} Nov 21 17:10:15 crc kubenswrapper[4967]: I1121 17:10:15.333788 4967 generic.go:334] "Generic (PLEG): container finished" podID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerID="84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591" exitCode=0 Nov 21 17:10:15 crc kubenswrapper[4967]: I1121 17:10:15.333840 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerDied","Data":"84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591"} Nov 21 17:10:15 crc kubenswrapper[4967]: I1121 17:10:15.336122 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 17:10:16 crc kubenswrapper[4967]: I1121 17:10:16.346171 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerStarted","Data":"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378"} Nov 21 17:10:18 crc kubenswrapper[4967]: I1121 17:10:18.377286 4967 generic.go:334] "Generic (PLEG): container finished" podID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerID="d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378" exitCode=0 Nov 21 17:10:18 crc kubenswrapper[4967]: I1121 17:10:18.377493 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerDied","Data":"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378"} Nov 21 17:10:19 crc kubenswrapper[4967]: I1121 17:10:19.407486 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerStarted","Data":"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d"} Nov 21 17:10:19 crc kubenswrapper[4967]: I1121 17:10:19.441326 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-z5mtx" podStartSLOduration=2.9171099910000002 podStartE2EDuration="6.441277915s" podCreationTimestamp="2025-11-21 17:10:13 +0000 UTC" firstStartedPulling="2025-11-21 17:10:15.335891905 +0000 UTC m=+5703.594412913" lastFinishedPulling="2025-11-21 17:10:18.860059829 +0000 UTC m=+5707.118580837" observedRunningTime="2025-11-21 17:10:19.43550461 +0000 UTC m=+5707.694025618" watchObservedRunningTime="2025-11-21 17:10:19.441277915 +0000 UTC m=+5707.699798913" Nov 21 17:10:23 crc kubenswrapper[4967]: I1121 17:10:23.616247 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:23 crc kubenswrapper[4967]: I1121 17:10:23.616813 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:24 crc kubenswrapper[4967]: I1121 17:10:24.662714 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-z5mtx" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="registry-server" probeResult="failure" output=< Nov 21 17:10:24 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:10:24 crc kubenswrapper[4967]: > Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.204251 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.206842 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.215526 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.300811 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.300947 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spnlp\" (UniqueName: \"kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.301284 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.404351 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.404495 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spnlp\" (UniqueName: \"kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.404628 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.405081 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.405274 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.428757 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spnlp\" (UniqueName: \"kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp\") pod \"redhat-marketplace-ppbkt\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:27 crc kubenswrapper[4967]: I1121 17:10:27.528774 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:28 crc kubenswrapper[4967]: I1121 17:10:28.054756 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:28 crc kubenswrapper[4967]: W1121 17:10:28.078509 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod930f5a62_8bc6_4884_95b1_1666b32aaa8d.slice/crio-4796f13e9a87b8dd2a632270976672e62c9e11d62e071188e6bd1d3fc3ab6290 WatchSource:0}: Error finding container 4796f13e9a87b8dd2a632270976672e62c9e11d62e071188e6bd1d3fc3ab6290: Status 404 returned error can't find the container with id 4796f13e9a87b8dd2a632270976672e62c9e11d62e071188e6bd1d3fc3ab6290 Nov 21 17:10:28 crc kubenswrapper[4967]: I1121 17:10:28.521982 4967 generic.go:334] "Generic (PLEG): container finished" podID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerID="d6dcaf047e0c8af554001d4ef0545ba48cb7893992aef7eb408eea4bd5776d79" exitCode=0 Nov 21 17:10:28 crc kubenswrapper[4967]: I1121 17:10:28.522038 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerDied","Data":"d6dcaf047e0c8af554001d4ef0545ba48cb7893992aef7eb408eea4bd5776d79"} Nov 21 17:10:28 crc kubenswrapper[4967]: I1121 17:10:28.522278 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerStarted","Data":"4796f13e9a87b8dd2a632270976672e62c9e11d62e071188e6bd1d3fc3ab6290"} Nov 21 17:10:30 crc kubenswrapper[4967]: I1121 17:10:30.550375 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerStarted","Data":"8d8bb685dfcaeb1f3af0b6372f0a4057abadb0d874a9306700f3ccc08918cdaf"} Nov 21 17:10:31 crc kubenswrapper[4967]: I1121 17:10:31.563642 4967 generic.go:334] "Generic (PLEG): container finished" podID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerID="8d8bb685dfcaeb1f3af0b6372f0a4057abadb0d874a9306700f3ccc08918cdaf" exitCode=0 Nov 21 17:10:31 crc kubenswrapper[4967]: I1121 17:10:31.563856 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerDied","Data":"8d8bb685dfcaeb1f3af0b6372f0a4057abadb0d874a9306700f3ccc08918cdaf"} Nov 21 17:10:32 crc kubenswrapper[4967]: I1121 17:10:32.578874 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerStarted","Data":"79ffa6d2981e0288f1d2bd716d0a8d40677d3f6c0fe8f172f3ea1071bf1c1d79"} Nov 21 17:10:32 crc kubenswrapper[4967]: I1121 17:10:32.616249 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ppbkt" podStartSLOduration=2.023495776 podStartE2EDuration="5.616223283s" podCreationTimestamp="2025-11-21 17:10:27 +0000 UTC" firstStartedPulling="2025-11-21 17:10:28.525553444 +0000 UTC m=+5716.784074452" lastFinishedPulling="2025-11-21 17:10:32.118280951 +0000 UTC m=+5720.376801959" observedRunningTime="2025-11-21 17:10:32.598355661 +0000 UTC m=+5720.856876689" watchObservedRunningTime="2025-11-21 17:10:32.616223283 +0000 UTC m=+5720.874744281" Nov 21 17:10:33 crc kubenswrapper[4967]: I1121 17:10:33.689742 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:33 crc kubenswrapper[4967]: I1121 17:10:33.752513 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.395489 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.398653 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.434024 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.515522 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.515681 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6ls8\" (UniqueName: \"kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.515709 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.619689 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6ls8\" (UniqueName: \"kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.619744 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.619951 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.620472 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.621504 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.645189 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6ls8\" (UniqueName: \"kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8\") pod \"certified-operators-r2fdr\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.756615 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.996729 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:35 crc kubenswrapper[4967]: I1121 17:10:35.997072 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-z5mtx" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="registry-server" containerID="cri-o://77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d" gracePeriod=2 Nov 21 17:10:36 crc kubenswrapper[4967]: W1121 17:10:36.379202 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ff3938a_2275_44f4_9aa3_e8bba7d98e13.slice/crio-2e9946fb49fcccb7a22d04e3b9107324f580fa99cef950740d0a1f596bca7e77 WatchSource:0}: Error finding container 2e9946fb49fcccb7a22d04e3b9107324f580fa99cef950740d0a1f596bca7e77: Status 404 returned error can't find the container with id 2e9946fb49fcccb7a22d04e3b9107324f580fa99cef950740d0a1f596bca7e77 Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.380917 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.482119 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.550251 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities\") pod \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.550355 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content\") pod \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.550455 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbhn2\" (UniqueName: \"kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2\") pod \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\" (UID: \"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35\") " Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.551929 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities" (OuterVolumeSpecName: "utilities") pod "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" (UID: "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.561563 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2" (OuterVolumeSpecName: "kube-api-access-xbhn2") pod "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" (UID: "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35"). InnerVolumeSpecName "kube-api-access-xbhn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.607969 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" (UID: "c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.630663 4967 generic.go:334] "Generic (PLEG): container finished" podID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerID="50df06dee94760dff3484c1c9cbed6c452409d71e53e742cabb942cea91148f5" exitCode=0 Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.630720 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerDied","Data":"50df06dee94760dff3484c1c9cbed6c452409d71e53e742cabb942cea91148f5"} Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.630783 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerStarted","Data":"2e9946fb49fcccb7a22d04e3b9107324f580fa99cef950740d0a1f596bca7e77"} Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.635335 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-z5mtx" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.635406 4967 generic.go:334] "Generic (PLEG): container finished" podID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerID="77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d" exitCode=0 Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.635524 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerDied","Data":"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d"} Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.635622 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-z5mtx" event={"ID":"c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35","Type":"ContainerDied","Data":"7fb8df0961931145a2a2e7c7f09c97250675b6b30ed7924cadb4453c3f12b9b3"} Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.635659 4967 scope.go:117] "RemoveContainer" containerID="77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.654495 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.654526 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.654537 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbhn2\" (UniqueName: \"kubernetes.io/projected/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35-kube-api-access-xbhn2\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.745741 4967 scope.go:117] "RemoveContainer" containerID="d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.759870 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.771692 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-z5mtx"] Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.784472 4967 scope.go:117] "RemoveContainer" containerID="84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.836165 4967 scope.go:117] "RemoveContainer" containerID="77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d" Nov 21 17:10:36 crc kubenswrapper[4967]: E1121 17:10:36.836715 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d\": container with ID starting with 77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d not found: ID does not exist" containerID="77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.836767 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d"} err="failed to get container status \"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d\": rpc error: code = NotFound desc = could not find container \"77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d\": container with ID starting with 77513fe15a73cc4490baadd1a6c02df48d31ab275a5b6ad312d5da632a28b38d not found: ID does not exist" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.836824 4967 scope.go:117] "RemoveContainer" containerID="d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378" Nov 21 17:10:36 crc kubenswrapper[4967]: E1121 17:10:36.837082 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378\": container with ID starting with d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378 not found: ID does not exist" containerID="d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.837105 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378"} err="failed to get container status \"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378\": rpc error: code = NotFound desc = could not find container \"d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378\": container with ID starting with d252eb3a404114df36a07185c4165fcfc07a262812ba53a4ae5dbf16e3725378 not found: ID does not exist" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.837122 4967 scope.go:117] "RemoveContainer" containerID="84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591" Nov 21 17:10:36 crc kubenswrapper[4967]: E1121 17:10:36.837475 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591\": container with ID starting with 84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591 not found: ID does not exist" containerID="84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591" Nov 21 17:10:36 crc kubenswrapper[4967]: I1121 17:10:36.837501 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591"} err="failed to get container status \"84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591\": rpc error: code = NotFound desc = could not find container \"84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591\": container with ID starting with 84a7483caae5f9315026649910cc76524a7854a42b61affabe87f7de535b9591 not found: ID does not exist" Nov 21 17:10:37 crc kubenswrapper[4967]: I1121 17:10:37.529134 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:37 crc kubenswrapper[4967]: I1121 17:10:37.530099 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:37 crc kubenswrapper[4967]: I1121 17:10:37.589730 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:37 crc kubenswrapper[4967]: I1121 17:10:37.651617 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerStarted","Data":"5ee5ea2b18b60c1d21eb548a39c3677940bdeb81af21aaba8d3f7bf4a3e758b9"} Nov 21 17:10:37 crc kubenswrapper[4967]: I1121 17:10:37.713808 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:38 crc kubenswrapper[4967]: I1121 17:10:38.564634 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" path="/var/lib/kubelet/pods/c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35/volumes" Nov 21 17:10:39 crc kubenswrapper[4967]: I1121 17:10:39.696372 4967 generic.go:334] "Generic (PLEG): container finished" podID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerID="5ee5ea2b18b60c1d21eb548a39c3677940bdeb81af21aaba8d3f7bf4a3e758b9" exitCode=0 Nov 21 17:10:39 crc kubenswrapper[4967]: I1121 17:10:39.696634 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerDied","Data":"5ee5ea2b18b60c1d21eb548a39c3677940bdeb81af21aaba8d3f7bf4a3e758b9"} Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.381340 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.381684 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ppbkt" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="registry-server" containerID="cri-o://79ffa6d2981e0288f1d2bd716d0a8d40677d3f6c0fe8f172f3ea1071bf1c1d79" gracePeriod=2 Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.724001 4967 generic.go:334] "Generic (PLEG): container finished" podID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerID="79ffa6d2981e0288f1d2bd716d0a8d40677d3f6c0fe8f172f3ea1071bf1c1d79" exitCode=0 Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.724482 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerDied","Data":"79ffa6d2981e0288f1d2bd716d0a8d40677d3f6c0fe8f172f3ea1071bf1c1d79"} Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.731434 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerStarted","Data":"70eb9b75b45c919e31c5f68ef1ece2023e43d789ede99702f93fed6cb9d20337"} Nov 21 17:10:40 crc kubenswrapper[4967]: I1121 17:10:40.758937 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r2fdr" podStartSLOduration=2.308835261 podStartE2EDuration="5.758914493s" podCreationTimestamp="2025-11-21 17:10:35 +0000 UTC" firstStartedPulling="2025-11-21 17:10:36.633734476 +0000 UTC m=+5724.892255494" lastFinishedPulling="2025-11-21 17:10:40.083813718 +0000 UTC m=+5728.342334726" observedRunningTime="2025-11-21 17:10:40.748721251 +0000 UTC m=+5729.007242259" watchObservedRunningTime="2025-11-21 17:10:40.758914493 +0000 UTC m=+5729.017435501" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.002423 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.103216 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities\") pod \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.103467 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content\") pod \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.103765 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spnlp\" (UniqueName: \"kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp\") pod \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\" (UID: \"930f5a62-8bc6-4884-95b1-1666b32aaa8d\") " Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.104247 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities" (OuterVolumeSpecName: "utilities") pod "930f5a62-8bc6-4884-95b1-1666b32aaa8d" (UID: "930f5a62-8bc6-4884-95b1-1666b32aaa8d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.105729 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.114198 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp" (OuterVolumeSpecName: "kube-api-access-spnlp") pod "930f5a62-8bc6-4884-95b1-1666b32aaa8d" (UID: "930f5a62-8bc6-4884-95b1-1666b32aaa8d"). InnerVolumeSpecName "kube-api-access-spnlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.121569 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "930f5a62-8bc6-4884-95b1-1666b32aaa8d" (UID: "930f5a62-8bc6-4884-95b1-1666b32aaa8d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.208406 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/930f5a62-8bc6-4884-95b1-1666b32aaa8d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.209093 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spnlp\" (UniqueName: \"kubernetes.io/projected/930f5a62-8bc6-4884-95b1-1666b32aaa8d-kube-api-access-spnlp\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.752208 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ppbkt" event={"ID":"930f5a62-8bc6-4884-95b1-1666b32aaa8d","Type":"ContainerDied","Data":"4796f13e9a87b8dd2a632270976672e62c9e11d62e071188e6bd1d3fc3ab6290"} Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.752280 4967 scope.go:117] "RemoveContainer" containerID="79ffa6d2981e0288f1d2bd716d0a8d40677d3f6c0fe8f172f3ea1071bf1c1d79" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.753042 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ppbkt" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.781993 4967 scope.go:117] "RemoveContainer" containerID="8d8bb685dfcaeb1f3af0b6372f0a4057abadb0d874a9306700f3ccc08918cdaf" Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.803828 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.817639 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ppbkt"] Nov 21 17:10:41 crc kubenswrapper[4967]: I1121 17:10:41.827140 4967 scope.go:117] "RemoveContainer" containerID="d6dcaf047e0c8af554001d4ef0545ba48cb7893992aef7eb408eea4bd5776d79" Nov 21 17:10:42 crc kubenswrapper[4967]: I1121 17:10:42.553180 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" path="/var/lib/kubelet/pods/930f5a62-8bc6-4884-95b1-1666b32aaa8d/volumes" Nov 21 17:10:45 crc kubenswrapper[4967]: I1121 17:10:45.757272 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:45 crc kubenswrapper[4967]: I1121 17:10:45.757876 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:46 crc kubenswrapper[4967]: I1121 17:10:46.522358 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:10:46 crc kubenswrapper[4967]: I1121 17:10:46.522425 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:10:47 crc kubenswrapper[4967]: I1121 17:10:47.559364 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-r2fdr" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="registry-server" probeResult="failure" output=< Nov 21 17:10:47 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:10:47 crc kubenswrapper[4967]: > Nov 21 17:10:55 crc kubenswrapper[4967]: I1121 17:10:55.813776 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:55 crc kubenswrapper[4967]: I1121 17:10:55.867268 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:56 crc kubenswrapper[4967]: I1121 17:10:56.051567 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:56 crc kubenswrapper[4967]: I1121 17:10:56.946548 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r2fdr" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="registry-server" containerID="cri-o://70eb9b75b45c919e31c5f68ef1ece2023e43d789ede99702f93fed6cb9d20337" gracePeriod=2 Nov 21 17:10:57 crc kubenswrapper[4967]: I1121 17:10:57.972393 4967 generic.go:334] "Generic (PLEG): container finished" podID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerID="70eb9b75b45c919e31c5f68ef1ece2023e43d789ede99702f93fed6cb9d20337" exitCode=0 Nov 21 17:10:57 crc kubenswrapper[4967]: I1121 17:10:57.972472 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerDied","Data":"70eb9b75b45c919e31c5f68ef1ece2023e43d789ede99702f93fed6cb9d20337"} Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.259847 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.397754 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content\") pod \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.398246 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities\") pod \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.398948 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities" (OuterVolumeSpecName: "utilities") pod "4ff3938a-2275-44f4-9aa3-e8bba7d98e13" (UID: "4ff3938a-2275-44f4-9aa3-e8bba7d98e13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.399366 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6ls8\" (UniqueName: \"kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8\") pod \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\" (UID: \"4ff3938a-2275-44f4-9aa3-e8bba7d98e13\") " Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.400229 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.405651 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8" (OuterVolumeSpecName: "kube-api-access-c6ls8") pod "4ff3938a-2275-44f4-9aa3-e8bba7d98e13" (UID: "4ff3938a-2275-44f4-9aa3-e8bba7d98e13"). InnerVolumeSpecName "kube-api-access-c6ls8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.453823 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ff3938a-2275-44f4-9aa3-e8bba7d98e13" (UID: "4ff3938a-2275-44f4-9aa3-e8bba7d98e13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.502974 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.503010 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6ls8\" (UniqueName: \"kubernetes.io/projected/4ff3938a-2275-44f4-9aa3-e8bba7d98e13-kube-api-access-c6ls8\") on node \"crc\" DevicePath \"\"" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.991411 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r2fdr" event={"ID":"4ff3938a-2275-44f4-9aa3-e8bba7d98e13","Type":"ContainerDied","Data":"2e9946fb49fcccb7a22d04e3b9107324f580fa99cef950740d0a1f596bca7e77"} Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.991473 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r2fdr" Nov 21 17:10:58 crc kubenswrapper[4967]: I1121 17:10:58.991487 4967 scope.go:117] "RemoveContainer" containerID="70eb9b75b45c919e31c5f68ef1ece2023e43d789ede99702f93fed6cb9d20337" Nov 21 17:10:59 crc kubenswrapper[4967]: I1121 17:10:59.022466 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:59 crc kubenswrapper[4967]: I1121 17:10:59.032674 4967 scope.go:117] "RemoveContainer" containerID="5ee5ea2b18b60c1d21eb548a39c3677940bdeb81af21aaba8d3f7bf4a3e758b9" Nov 21 17:10:59 crc kubenswrapper[4967]: I1121 17:10:59.032684 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r2fdr"] Nov 21 17:10:59 crc kubenswrapper[4967]: I1121 17:10:59.175479 4967 scope.go:117] "RemoveContainer" containerID="50df06dee94760dff3484c1c9cbed6c452409d71e53e742cabb942cea91148f5" Nov 21 17:11:00 crc kubenswrapper[4967]: I1121 17:11:00.553993 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" path="/var/lib/kubelet/pods/4ff3938a-2275-44f4-9aa3-e8bba7d98e13/volumes" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.522228 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.522764 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.648945 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649807 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649830 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649855 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649864 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649880 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649890 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649920 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649928 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649944 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649951 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649970 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649978 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.649991 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.649999 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="extract-utilities" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.650016 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.650027 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="extract-content" Nov 21 17:11:16 crc kubenswrapper[4967]: E1121 17:11:16.650047 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.650057 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.650350 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5b8a2f8-ceed-4bff-8a88-42fc31ce5e35" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.650385 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ff3938a-2275-44f4-9aa3-e8bba7d98e13" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.650399 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="930f5a62-8bc6-4884-95b1-1666b32aaa8d" containerName="registry-server" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.652548 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.663113 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.768330 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmc6f\" (UniqueName: \"kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.768431 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.768604 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.870852 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.871048 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.871258 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmc6f\" (UniqueName: \"kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.871590 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.871655 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.893568 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmc6f\" (UniqueName: \"kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f\") pod \"redhat-operators-t7vdt\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:16 crc kubenswrapper[4967]: I1121 17:11:16.982255 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:17 crc kubenswrapper[4967]: I1121 17:11:17.488183 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:18 crc kubenswrapper[4967]: I1121 17:11:18.234172 4967 generic.go:334] "Generic (PLEG): container finished" podID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerID="b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340" exitCode=0 Nov 21 17:11:18 crc kubenswrapper[4967]: I1121 17:11:18.234278 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerDied","Data":"b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340"} Nov 21 17:11:18 crc kubenswrapper[4967]: I1121 17:11:18.237195 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerStarted","Data":"2f94d9007ab3ee0879974092806d390b778e804d150dea61260b07cde50d9bb1"} Nov 21 17:11:20 crc kubenswrapper[4967]: I1121 17:11:20.259694 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerStarted","Data":"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596"} Nov 21 17:11:24 crc kubenswrapper[4967]: I1121 17:11:24.320349 4967 generic.go:334] "Generic (PLEG): container finished" podID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerID="cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596" exitCode=0 Nov 21 17:11:24 crc kubenswrapper[4967]: I1121 17:11:24.320797 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerDied","Data":"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596"} Nov 21 17:11:25 crc kubenswrapper[4967]: I1121 17:11:25.346066 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerStarted","Data":"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501"} Nov 21 17:11:25 crc kubenswrapper[4967]: I1121 17:11:25.371301 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t7vdt" podStartSLOduration=2.761692384 podStartE2EDuration="9.371276215s" podCreationTimestamp="2025-11-21 17:11:16 +0000 UTC" firstStartedPulling="2025-11-21 17:11:18.236754019 +0000 UTC m=+5766.495275027" lastFinishedPulling="2025-11-21 17:11:24.84633785 +0000 UTC m=+5773.104858858" observedRunningTime="2025-11-21 17:11:25.362745881 +0000 UTC m=+5773.621266899" watchObservedRunningTime="2025-11-21 17:11:25.371276215 +0000 UTC m=+5773.629797233" Nov 21 17:11:26 crc kubenswrapper[4967]: I1121 17:11:26.982941 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:26 crc kubenswrapper[4967]: I1121 17:11:26.983340 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:28 crc kubenswrapper[4967]: I1121 17:11:28.352355 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t7vdt" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" probeResult="failure" output=< Nov 21 17:11:28 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:11:28 crc kubenswrapper[4967]: > Nov 21 17:11:38 crc kubenswrapper[4967]: I1121 17:11:38.046378 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t7vdt" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" probeResult="failure" output=< Nov 21 17:11:38 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:11:38 crc kubenswrapper[4967]: > Nov 21 17:11:46 crc kubenswrapper[4967]: I1121 17:11:46.522276 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:11:46 crc kubenswrapper[4967]: I1121 17:11:46.522972 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:11:46 crc kubenswrapper[4967]: I1121 17:11:46.523043 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:11:46 crc kubenswrapper[4967]: I1121 17:11:46.524470 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:11:46 crc kubenswrapper[4967]: I1121 17:11:46.524561 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" gracePeriod=600 Nov 21 17:11:46 crc kubenswrapper[4967]: E1121 17:11:46.645211 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.034091 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.089417 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.662625 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" exitCode=0 Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.662715 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d"} Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.663816 4967 scope.go:117] "RemoveContainer" containerID="00750826e689fea3e4051e4f783e77d873d855f47ab7a93be0c135b70810dd5c" Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.664896 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:11:47 crc kubenswrapper[4967]: E1121 17:11:47.665267 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:11:47 crc kubenswrapper[4967]: I1121 17:11:47.851614 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:48 crc kubenswrapper[4967]: I1121 17:11:48.676996 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t7vdt" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" containerID="cri-o://5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501" gracePeriod=2 Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.203784 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.315687 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmc6f\" (UniqueName: \"kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f\") pod \"3241343a-d423-4cbc-927f-52a90a7d5d16\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.316269 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content\") pod \"3241343a-d423-4cbc-927f-52a90a7d5d16\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.316294 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities\") pod \"3241343a-d423-4cbc-927f-52a90a7d5d16\" (UID: \"3241343a-d423-4cbc-927f-52a90a7d5d16\") " Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.317174 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities" (OuterVolumeSpecName: "utilities") pod "3241343a-d423-4cbc-927f-52a90a7d5d16" (UID: "3241343a-d423-4cbc-927f-52a90a7d5d16"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.323949 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f" (OuterVolumeSpecName: "kube-api-access-gmc6f") pod "3241343a-d423-4cbc-927f-52a90a7d5d16" (UID: "3241343a-d423-4cbc-927f-52a90a7d5d16"). InnerVolumeSpecName "kube-api-access-gmc6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.419331 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.419373 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmc6f\" (UniqueName: \"kubernetes.io/projected/3241343a-d423-4cbc-927f-52a90a7d5d16-kube-api-access-gmc6f\") on node \"crc\" DevicePath \"\"" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.421150 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3241343a-d423-4cbc-927f-52a90a7d5d16" (UID: "3241343a-d423-4cbc-927f-52a90a7d5d16"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.522506 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3241343a-d423-4cbc-927f-52a90a7d5d16-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.690411 4967 generic.go:334] "Generic (PLEG): container finished" podID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerID="5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501" exitCode=0 Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.690463 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerDied","Data":"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501"} Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.690495 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t7vdt" event={"ID":"3241343a-d423-4cbc-927f-52a90a7d5d16","Type":"ContainerDied","Data":"2f94d9007ab3ee0879974092806d390b778e804d150dea61260b07cde50d9bb1"} Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.690515 4967 scope.go:117] "RemoveContainer" containerID="5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.690513 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t7vdt" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.724376 4967 scope.go:117] "RemoveContainer" containerID="cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.727352 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.737765 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t7vdt"] Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.756245 4967 scope.go:117] "RemoveContainer" containerID="b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.800189 4967 scope.go:117] "RemoveContainer" containerID="5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501" Nov 21 17:11:49 crc kubenswrapper[4967]: E1121 17:11:49.800658 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501\": container with ID starting with 5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501 not found: ID does not exist" containerID="5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.800700 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501"} err="failed to get container status \"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501\": rpc error: code = NotFound desc = could not find container \"5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501\": container with ID starting with 5eebd485e3fbc5159ca02d10550d5157df3e06e8594c9811be3a469f5e995501 not found: ID does not exist" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.800727 4967 scope.go:117] "RemoveContainer" containerID="cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596" Nov 21 17:11:49 crc kubenswrapper[4967]: E1121 17:11:49.800958 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596\": container with ID starting with cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596 not found: ID does not exist" containerID="cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.800986 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596"} err="failed to get container status \"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596\": rpc error: code = NotFound desc = could not find container \"cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596\": container with ID starting with cb10a0caf5ed4f141fe48088d338c028da47834ad6ef74185b937f0e3df12596 not found: ID does not exist" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.801001 4967 scope.go:117] "RemoveContainer" containerID="b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340" Nov 21 17:11:49 crc kubenswrapper[4967]: E1121 17:11:49.801403 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340\": container with ID starting with b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340 not found: ID does not exist" containerID="b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340" Nov 21 17:11:49 crc kubenswrapper[4967]: I1121 17:11:49.801432 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340"} err="failed to get container status \"b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340\": rpc error: code = NotFound desc = could not find container \"b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340\": container with ID starting with b42e335d14a625ad3657f492cde02cbc4f3892361ecb7dbde992816851798340 not found: ID does not exist" Nov 21 17:11:50 crc kubenswrapper[4967]: I1121 17:11:50.552022 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" path="/var/lib/kubelet/pods/3241343a-d423-4cbc-927f-52a90a7d5d16/volumes" Nov 21 17:11:58 crc kubenswrapper[4967]: I1121 17:11:58.536957 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:11:58 crc kubenswrapper[4967]: E1121 17:11:58.538195 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:12:10 crc kubenswrapper[4967]: I1121 17:12:10.537084 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:12:10 crc kubenswrapper[4967]: E1121 17:12:10.537945 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:12:21 crc kubenswrapper[4967]: I1121 17:12:21.537117 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:12:21 crc kubenswrapper[4967]: E1121 17:12:21.538213 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:12:36 crc kubenswrapper[4967]: I1121 17:12:36.536104 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:12:36 crc kubenswrapper[4967]: E1121 17:12:36.537038 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:12:51 crc kubenswrapper[4967]: I1121 17:12:51.536925 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:12:51 crc kubenswrapper[4967]: E1121 17:12:51.537866 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:13:04 crc kubenswrapper[4967]: I1121 17:13:04.536705 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:13:04 crc kubenswrapper[4967]: E1121 17:13:04.537568 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:13:19 crc kubenswrapper[4967]: I1121 17:13:19.536845 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:13:19 crc kubenswrapper[4967]: E1121 17:13:19.537672 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:13:30 crc kubenswrapper[4967]: I1121 17:13:30.539123 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:13:30 crc kubenswrapper[4967]: E1121 17:13:30.541008 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:13:43 crc kubenswrapper[4967]: I1121 17:13:43.536525 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:13:43 crc kubenswrapper[4967]: E1121 17:13:43.537347 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:13:48 crc kubenswrapper[4967]: I1121 17:13:48.809953 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="b3d39ab9-f219-4af5-b82c-102fefaff9bc" containerName="galera" probeResult="failure" output="command timed out" Nov 21 17:13:55 crc kubenswrapper[4967]: I1121 17:13:55.536376 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:13:55 crc kubenswrapper[4967]: E1121 17:13:55.537120 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:14:10 crc kubenswrapper[4967]: I1121 17:14:10.538007 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:14:10 crc kubenswrapper[4967]: E1121 17:14:10.538862 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:14:25 crc kubenswrapper[4967]: I1121 17:14:25.537646 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:14:25 crc kubenswrapper[4967]: E1121 17:14:25.538428 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:14:36 crc kubenswrapper[4967]: I1121 17:14:36.537197 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:14:36 crc kubenswrapper[4967]: E1121 17:14:36.538482 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:14:49 crc kubenswrapper[4967]: I1121 17:14:49.535936 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:14:49 crc kubenswrapper[4967]: E1121 17:14:49.536964 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.192004 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk"] Nov 21 17:15:00 crc kubenswrapper[4967]: E1121 17:15:00.193691 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.193724 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" Nov 21 17:15:00 crc kubenswrapper[4967]: E1121 17:15:00.193796 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="extract-content" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.193814 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="extract-content" Nov 21 17:15:00 crc kubenswrapper[4967]: E1121 17:15:00.193854 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="extract-utilities" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.193867 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="extract-utilities" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.194326 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="3241343a-d423-4cbc-927f-52a90a7d5d16" containerName="registry-server" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.195602 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.199705 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.200253 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.217954 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk"] Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.316352 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.316495 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.316543 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8w2f\" (UniqueName: \"kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.420724 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.421592 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.421705 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8w2f\" (UniqueName: \"kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.421799 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.428939 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.438513 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8w2f\" (UniqueName: \"kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f\") pod \"collect-profiles-29395755-jdjjk\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:00 crc kubenswrapper[4967]: I1121 17:15:00.534132 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:01 crc kubenswrapper[4967]: I1121 17:15:01.030077 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk"] Nov 21 17:15:01 crc kubenswrapper[4967]: I1121 17:15:01.536760 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:15:01 crc kubenswrapper[4967]: E1121 17:15:01.537514 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:15:02 crc kubenswrapper[4967]: I1121 17:15:02.063486 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" event={"ID":"12a3823a-2a4d-41f9-b327-2d0f87951cb5","Type":"ContainerStarted","Data":"c33f25dbb70fa93692d0caedbe6c42c9e124bf3a475aeebdbe52fe18b098ce9e"} Nov 21 17:15:02 crc kubenswrapper[4967]: I1121 17:15:02.063973 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" event={"ID":"12a3823a-2a4d-41f9-b327-2d0f87951cb5","Type":"ContainerStarted","Data":"61c15b8178472e3fa7db38759b553dda1bc8a18776cba9fee50d3c6bacef5a6f"} Nov 21 17:15:02 crc kubenswrapper[4967]: I1121 17:15:02.088105 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" podStartSLOduration=2.088081787 podStartE2EDuration="2.088081787s" podCreationTimestamp="2025-11-21 17:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:15:02.086688147 +0000 UTC m=+5990.345209165" watchObservedRunningTime="2025-11-21 17:15:02.088081787 +0000 UTC m=+5990.346602795" Nov 21 17:15:03 crc kubenswrapper[4967]: I1121 17:15:03.083603 4967 generic.go:334] "Generic (PLEG): container finished" podID="12a3823a-2a4d-41f9-b327-2d0f87951cb5" containerID="c33f25dbb70fa93692d0caedbe6c42c9e124bf3a475aeebdbe52fe18b098ce9e" exitCode=0 Nov 21 17:15:03 crc kubenswrapper[4967]: I1121 17:15:03.083693 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" event={"ID":"12a3823a-2a4d-41f9-b327-2d0f87951cb5","Type":"ContainerDied","Data":"c33f25dbb70fa93692d0caedbe6c42c9e124bf3a475aeebdbe52fe18b098ce9e"} Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.498385 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.646436 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume\") pod \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.646567 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume\") pod \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.647431 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8w2f\" (UniqueName: \"kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f\") pod \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\" (UID: \"12a3823a-2a4d-41f9-b327-2d0f87951cb5\") " Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.647447 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume" (OuterVolumeSpecName: "config-volume") pod "12a3823a-2a4d-41f9-b327-2d0f87951cb5" (UID: "12a3823a-2a4d-41f9-b327-2d0f87951cb5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.653468 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "12a3823a-2a4d-41f9-b327-2d0f87951cb5" (UID: "12a3823a-2a4d-41f9-b327-2d0f87951cb5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.654080 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f" (OuterVolumeSpecName: "kube-api-access-d8w2f") pod "12a3823a-2a4d-41f9-b327-2d0f87951cb5" (UID: "12a3823a-2a4d-41f9-b327-2d0f87951cb5"). InnerVolumeSpecName "kube-api-access-d8w2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.749473 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8w2f\" (UniqueName: \"kubernetes.io/projected/12a3823a-2a4d-41f9-b327-2d0f87951cb5-kube-api-access-d8w2f\") on node \"crc\" DevicePath \"\"" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.749523 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12a3823a-2a4d-41f9-b327-2d0f87951cb5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:15:04 crc kubenswrapper[4967]: I1121 17:15:04.749537 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a3823a-2a4d-41f9-b327-2d0f87951cb5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:15:05 crc kubenswrapper[4967]: I1121 17:15:05.110156 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" event={"ID":"12a3823a-2a4d-41f9-b327-2d0f87951cb5","Type":"ContainerDied","Data":"61c15b8178472e3fa7db38759b553dda1bc8a18776cba9fee50d3c6bacef5a6f"} Nov 21 17:15:05 crc kubenswrapper[4967]: I1121 17:15:05.110836 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61c15b8178472e3fa7db38759b553dda1bc8a18776cba9fee50d3c6bacef5a6f" Nov 21 17:15:05 crc kubenswrapper[4967]: I1121 17:15:05.110586 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk" Nov 21 17:15:05 crc kubenswrapper[4967]: I1121 17:15:05.178428 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8"] Nov 21 17:15:05 crc kubenswrapper[4967]: I1121 17:15:05.189982 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395710-ffvb8"] Nov 21 17:15:06 crc kubenswrapper[4967]: I1121 17:15:06.550279 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7739352c-5106-4aec-b99a-2eab9c577078" path="/var/lib/kubelet/pods/7739352c-5106-4aec-b99a-2eab9c577078/volumes" Nov 21 17:15:12 crc kubenswrapper[4967]: I1121 17:15:12.545537 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:15:12 crc kubenswrapper[4967]: E1121 17:15:12.546684 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:15:25 crc kubenswrapper[4967]: I1121 17:15:25.462888 4967 scope.go:117] "RemoveContainer" containerID="8c9255165734ec39ce45f9515dd6ac8d5132bc78f32a54b8865a9d7ac0dc3614" Nov 21 17:15:25 crc kubenswrapper[4967]: I1121 17:15:25.535947 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:15:25 crc kubenswrapper[4967]: E1121 17:15:25.536217 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:15:38 crc kubenswrapper[4967]: I1121 17:15:38.536756 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:15:38 crc kubenswrapper[4967]: E1121 17:15:38.538026 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:15:49 crc kubenswrapper[4967]: I1121 17:15:49.536383 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:15:49 crc kubenswrapper[4967]: E1121 17:15:49.537142 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:16:04 crc kubenswrapper[4967]: I1121 17:16:04.536582 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:16:04 crc kubenswrapper[4967]: E1121 17:16:04.539881 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:16:19 crc kubenswrapper[4967]: I1121 17:16:19.536936 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:16:19 crc kubenswrapper[4967]: E1121 17:16:19.537897 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:16:34 crc kubenswrapper[4967]: I1121 17:16:34.537294 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:16:34 crc kubenswrapper[4967]: E1121 17:16:34.538758 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:16:47 crc kubenswrapper[4967]: I1121 17:16:47.538280 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:16:48 crc kubenswrapper[4967]: I1121 17:16:48.394812 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69"} Nov 21 17:17:00 crc kubenswrapper[4967]: E1121 17:17:00.219499 4967 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.148:49930->38.102.83.148:38425: read tcp 38.102.83.148:49930->38.102.83.148:38425: read: connection reset by peer Nov 21 17:19:16 crc kubenswrapper[4967]: I1121 17:19:16.522735 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:19:16 crc kubenswrapper[4967]: I1121 17:19:16.523640 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:19:46 crc kubenswrapper[4967]: I1121 17:19:46.523027 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:19:46 crc kubenswrapper[4967]: I1121 17:19:46.523778 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:20:16 crc kubenswrapper[4967]: I1121 17:20:16.521887 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:20:16 crc kubenswrapper[4967]: I1121 17:20:16.522464 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:20:16 crc kubenswrapper[4967]: I1121 17:20:16.522518 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:20:16 crc kubenswrapper[4967]: I1121 17:20:16.523663 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:20:16 crc kubenswrapper[4967]: I1121 17:20:16.523737 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69" gracePeriod=600 Nov 21 17:20:17 crc kubenswrapper[4967]: I1121 17:20:17.006820 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69" exitCode=0 Nov 21 17:20:17 crc kubenswrapper[4967]: I1121 17:20:17.006904 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69"} Nov 21 17:20:17 crc kubenswrapper[4967]: I1121 17:20:17.007271 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b"} Nov 21 17:20:17 crc kubenswrapper[4967]: I1121 17:20:17.007292 4967 scope.go:117] "RemoveContainer" containerID="9469e1c3cb2c2c5df89cd6457ec8bafa975c24f02bebf9352c2015bee448a00d" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.380426 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:44 crc kubenswrapper[4967]: E1121 17:20:44.381765 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12a3823a-2a4d-41f9-b327-2d0f87951cb5" containerName="collect-profiles" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.381784 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="12a3823a-2a4d-41f9-b327-2d0f87951cb5" containerName="collect-profiles" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.382032 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="12a3823a-2a4d-41f9-b327-2d0f87951cb5" containerName="collect-profiles" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.386273 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.398426 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.512643 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.513483 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.514056 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp2zm\" (UniqueName: \"kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.617367 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.617492 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.617677 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp2zm\" (UniqueName: \"kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.618588 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.618869 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.638764 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp2zm\" (UniqueName: \"kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm\") pod \"redhat-marketplace-wxk8l\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:44 crc kubenswrapper[4967]: I1121 17:20:44.724282 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:45 crc kubenswrapper[4967]: I1121 17:20:45.198248 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:45 crc kubenswrapper[4967]: I1121 17:20:45.326916 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerStarted","Data":"c1347a4aa897aa25851c3f21ce3092a048eb65cc020c977377559a2479178097"} Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.341821 4967 generic.go:334] "Generic (PLEG): container finished" podID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerID="d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6" exitCode=0 Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.341898 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerDied","Data":"d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6"} Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.345039 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.760764 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.764583 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.773726 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.778968 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.779037 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.779118 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnrkg\" (UniqueName: \"kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.881088 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.881709 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.881914 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnrkg\" (UniqueName: \"kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.882274 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.882306 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.900967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnrkg\" (UniqueName: \"kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg\") pod \"certified-operators-wt9hn\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.950939 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.957271 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.973774 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.984955 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.985001 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jccsv\" (UniqueName: \"kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:46 crc kubenswrapper[4967]: I1121 17:20:46.985140 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.088479 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.088566 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jccsv\" (UniqueName: \"kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.088660 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.089047 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.089206 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.089331 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.115018 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jccsv\" (UniqueName: \"kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv\") pod \"community-operators-wf855\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.317936 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.697130 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:20:47 crc kubenswrapper[4967]: I1121 17:20:47.987073 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:20:48 crc kubenswrapper[4967]: W1121 17:20:48.047256 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a3e5a20_20c4_4f08_a7bd_49be8c2d8e2f.slice/crio-d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c WatchSource:0}: Error finding container d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c: Status 404 returned error can't find the container with id d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c Nov 21 17:20:48 crc kubenswrapper[4967]: E1121 17:20:48.286819 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98f6ecd8_bdd3_4b0f_acba_2826d7c3c052.slice/crio-b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98f6ecd8_bdd3_4b0f_acba_2826d7c3c052.slice/crio-conmon-b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.404801 4967 generic.go:334] "Generic (PLEG): container finished" podID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerID="b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e" exitCode=0 Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.404871 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerDied","Data":"b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e"} Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.407768 4967 generic.go:334] "Generic (PLEG): container finished" podID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerID="7c3991ab8171407c256c85cb9efabefad310e585deaa2c6ba069e22fc580dd0b" exitCode=0 Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.407808 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerDied","Data":"7c3991ab8171407c256c85cb9efabefad310e585deaa2c6ba069e22fc580dd0b"} Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.407878 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerStarted","Data":"d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c"} Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.410443 4967 generic.go:334] "Generic (PLEG): container finished" podID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerID="37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1" exitCode=0 Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.410470 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerDied","Data":"37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1"} Nov 21 17:20:48 crc kubenswrapper[4967]: I1121 17:20:48.410497 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerStarted","Data":"506586aecb33734fb2d1199bac3e9274ae3b7d70f9566102d448cb786abd1bcb"} Nov 21 17:20:49 crc kubenswrapper[4967]: I1121 17:20:49.422194 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerStarted","Data":"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31"} Nov 21 17:20:49 crc kubenswrapper[4967]: I1121 17:20:49.424450 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerStarted","Data":"a241fdbe78b40644cb12fb6ed0d31bb1e1d98a74d69c67d0bd44735954269b25"} Nov 21 17:20:49 crc kubenswrapper[4967]: I1121 17:20:49.426739 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerStarted","Data":"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8"} Nov 21 17:20:49 crc kubenswrapper[4967]: I1121 17:20:49.449924 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wxk8l" podStartSLOduration=2.9460159729999997 podStartE2EDuration="5.449897759s" podCreationTimestamp="2025-11-21 17:20:44 +0000 UTC" firstStartedPulling="2025-11-21 17:20:46.344803963 +0000 UTC m=+6334.603324971" lastFinishedPulling="2025-11-21 17:20:48.848685749 +0000 UTC m=+6337.107206757" observedRunningTime="2025-11-21 17:20:49.441619612 +0000 UTC m=+6337.700140620" watchObservedRunningTime="2025-11-21 17:20:49.449897759 +0000 UTC m=+6337.708418767" Nov 21 17:20:53 crc kubenswrapper[4967]: I1121 17:20:53.474808 4967 generic.go:334] "Generic (PLEG): container finished" podID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerID="a241fdbe78b40644cb12fb6ed0d31bb1e1d98a74d69c67d0bd44735954269b25" exitCode=0 Nov 21 17:20:53 crc kubenswrapper[4967]: I1121 17:20:53.474894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerDied","Data":"a241fdbe78b40644cb12fb6ed0d31bb1e1d98a74d69c67d0bd44735954269b25"} Nov 21 17:20:53 crc kubenswrapper[4967]: I1121 17:20:53.481346 4967 generic.go:334] "Generic (PLEG): container finished" podID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerID="4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8" exitCode=0 Nov 21 17:20:53 crc kubenswrapper[4967]: I1121 17:20:53.481388 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerDied","Data":"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8"} Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.492434 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerStarted","Data":"e3d966b9db60bf1ecefa006399cb7e861796eb9a4b333994e2d6346de6e018eb"} Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.495478 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerStarted","Data":"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23"} Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.523018 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wf855" podStartSLOduration=3.04283955 podStartE2EDuration="8.522995363s" podCreationTimestamp="2025-11-21 17:20:46 +0000 UTC" firstStartedPulling="2025-11-21 17:20:48.410529709 +0000 UTC m=+6336.669050717" lastFinishedPulling="2025-11-21 17:20:53.890685512 +0000 UTC m=+6342.149206530" observedRunningTime="2025-11-21 17:20:54.511099492 +0000 UTC m=+6342.769620520" watchObservedRunningTime="2025-11-21 17:20:54.522995363 +0000 UTC m=+6342.781516361" Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.532377 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wt9hn" podStartSLOduration=3.064736008 podStartE2EDuration="8.532354871s" podCreationTimestamp="2025-11-21 17:20:46 +0000 UTC" firstStartedPulling="2025-11-21 17:20:48.415963915 +0000 UTC m=+6336.674484933" lastFinishedPulling="2025-11-21 17:20:53.883582788 +0000 UTC m=+6342.142103796" observedRunningTime="2025-11-21 17:20:54.530027394 +0000 UTC m=+6342.788548402" watchObservedRunningTime="2025-11-21 17:20:54.532354871 +0000 UTC m=+6342.790875879" Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.724850 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.724920 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:54 crc kubenswrapper[4967]: I1121 17:20:54.782650 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:55 crc kubenswrapper[4967]: I1121 17:20:55.566741 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.089752 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.089842 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.138619 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.318648 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.318694 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.337785 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.366096 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:20:57 crc kubenswrapper[4967]: I1121 17:20:57.524924 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wxk8l" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="registry-server" containerID="cri-o://16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31" gracePeriod=2 Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.137852 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.266185 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities\") pod \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.266435 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content\") pod \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.266464 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp2zm\" (UniqueName: \"kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm\") pod \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\" (UID: \"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052\") " Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.266841 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities" (OuterVolumeSpecName: "utilities") pod "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" (UID: "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.267628 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.272770 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm" (OuterVolumeSpecName: "kube-api-access-wp2zm") pod "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" (UID: "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052"). InnerVolumeSpecName "kube-api-access-wp2zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.282665 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" (UID: "98f6ecd8-bdd3-4b0f-acba-2826d7c3c052"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.369724 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.369770 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp2zm\" (UniqueName: \"kubernetes.io/projected/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052-kube-api-access-wp2zm\") on node \"crc\" DevicePath \"\"" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.537050 4967 generic.go:334] "Generic (PLEG): container finished" podID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerID="16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31" exitCode=0 Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.537154 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wxk8l" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.550054 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerDied","Data":"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31"} Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.550101 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wxk8l" event={"ID":"98f6ecd8-bdd3-4b0f-acba-2826d7c3c052","Type":"ContainerDied","Data":"c1347a4aa897aa25851c3f21ce3092a048eb65cc020c977377559a2479178097"} Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.550146 4967 scope.go:117] "RemoveContainer" containerID="16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.584291 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.589361 4967 scope.go:117] "RemoveContainer" containerID="b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.601763 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wxk8l"] Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.622049 4967 scope.go:117] "RemoveContainer" containerID="d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.667083 4967 scope.go:117] "RemoveContainer" containerID="16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31" Nov 21 17:20:58 crc kubenswrapper[4967]: E1121 17:20:58.667589 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31\": container with ID starting with 16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31 not found: ID does not exist" containerID="16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.667735 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31"} err="failed to get container status \"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31\": rpc error: code = NotFound desc = could not find container \"16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31\": container with ID starting with 16bd8420dd09d9c7c4f91971f1c4821181091cfc925b905063fe5ba7ee879a31 not found: ID does not exist" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.667850 4967 scope.go:117] "RemoveContainer" containerID="b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e" Nov 21 17:20:58 crc kubenswrapper[4967]: E1121 17:20:58.668610 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e\": container with ID starting with b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e not found: ID does not exist" containerID="b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.668682 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e"} err="failed to get container status \"b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e\": rpc error: code = NotFound desc = could not find container \"b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e\": container with ID starting with b45f0d6e3b26a84c04c606c13f7f0de442bff0a7d718a287faee78910955327e not found: ID does not exist" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.668739 4967 scope.go:117] "RemoveContainer" containerID="d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6" Nov 21 17:20:58 crc kubenswrapper[4967]: E1121 17:20:58.669134 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6\": container with ID starting with d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6 not found: ID does not exist" containerID="d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6" Nov 21 17:20:58 crc kubenswrapper[4967]: I1121 17:20:58.669232 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6"} err="failed to get container status \"d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6\": rpc error: code = NotFound desc = could not find container \"d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6\": container with ID starting with d123417b6b1b8418a05996ec7819e7f4ab79e7a2d4c2332fb7155fc96f2b33b6 not found: ID does not exist" Nov 21 17:21:00 crc kubenswrapper[4967]: I1121 17:21:00.550058 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" path="/var/lib/kubelet/pods/98f6ecd8-bdd3-4b0f-acba-2826d7c3c052/volumes" Nov 21 17:21:07 crc kubenswrapper[4967]: I1121 17:21:07.149626 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:21:07 crc kubenswrapper[4967]: I1121 17:21:07.216335 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:21:07 crc kubenswrapper[4967]: I1121 17:21:07.368790 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:21:07 crc kubenswrapper[4967]: I1121 17:21:07.631934 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wt9hn" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="registry-server" containerID="cri-o://0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23" gracePeriod=2 Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.188903 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.343977 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities\") pod \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.344048 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnrkg\" (UniqueName: \"kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg\") pod \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.344136 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content\") pod \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\" (UID: \"5b2ca852-7745-4c5d-b784-edbc306dd5cd\") " Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.345228 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities" (OuterVolumeSpecName: "utilities") pod "5b2ca852-7745-4c5d-b784-edbc306dd5cd" (UID: "5b2ca852-7745-4c5d-b784-edbc306dd5cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.355258 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg" (OuterVolumeSpecName: "kube-api-access-jnrkg") pod "5b2ca852-7745-4c5d-b784-edbc306dd5cd" (UID: "5b2ca852-7745-4c5d-b784-edbc306dd5cd"). InnerVolumeSpecName "kube-api-access-jnrkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.414374 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b2ca852-7745-4c5d-b784-edbc306dd5cd" (UID: "5b2ca852-7745-4c5d-b784-edbc306dd5cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.448358 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.448396 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnrkg\" (UniqueName: \"kubernetes.io/projected/5b2ca852-7745-4c5d-b784-edbc306dd5cd-kube-api-access-jnrkg\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.448407 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b2ca852-7745-4c5d-b784-edbc306dd5cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.645899 4967 generic.go:334] "Generic (PLEG): container finished" podID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerID="0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23" exitCode=0 Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.645982 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wt9hn" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.645981 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerDied","Data":"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23"} Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.646407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wt9hn" event={"ID":"5b2ca852-7745-4c5d-b784-edbc306dd5cd","Type":"ContainerDied","Data":"506586aecb33734fb2d1199bac3e9274ae3b7d70f9566102d448cb786abd1bcb"} Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.646442 4967 scope.go:117] "RemoveContainer" containerID="0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.674045 4967 scope.go:117] "RemoveContainer" containerID="4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.697874 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.731719 4967 scope.go:117] "RemoveContainer" containerID="37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.733216 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wt9hn"] Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.779752 4967 scope.go:117] "RemoveContainer" containerID="0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23" Nov 21 17:21:08 crc kubenswrapper[4967]: E1121 17:21:08.780197 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23\": container with ID starting with 0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23 not found: ID does not exist" containerID="0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.780230 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23"} err="failed to get container status \"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23\": rpc error: code = NotFound desc = could not find container \"0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23\": container with ID starting with 0d09d130c82bc604232fee97cb52f4d41e3685179351dc210da79dd040c13d23 not found: ID does not exist" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.780252 4967 scope.go:117] "RemoveContainer" containerID="4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8" Nov 21 17:21:08 crc kubenswrapper[4967]: E1121 17:21:08.780846 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8\": container with ID starting with 4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8 not found: ID does not exist" containerID="4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.780865 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8"} err="failed to get container status \"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8\": rpc error: code = NotFound desc = could not find container \"4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8\": container with ID starting with 4fa7598ddb4e4252faa982c2a351b23110f3a124ee1c20f5098bd23cab2187e8 not found: ID does not exist" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.780878 4967 scope.go:117] "RemoveContainer" containerID="37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1" Nov 21 17:21:08 crc kubenswrapper[4967]: E1121 17:21:08.781185 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1\": container with ID starting with 37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1 not found: ID does not exist" containerID="37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1" Nov 21 17:21:08 crc kubenswrapper[4967]: I1121 17:21:08.781213 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1"} err="failed to get container status \"37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1\": rpc error: code = NotFound desc = could not find container \"37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1\": container with ID starting with 37bec3597cf86271eb2fe2ac07bda710b2416d19d357bcb4270264b98270dbf1 not found: ID does not exist" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.190852 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.191137 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wf855" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="registry-server" containerID="cri-o://e3d966b9db60bf1ecefa006399cb7e861796eb9a4b333994e2d6346de6e018eb" gracePeriod=2 Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.659712 4967 generic.go:334] "Generic (PLEG): container finished" podID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerID="e3d966b9db60bf1ecefa006399cb7e861796eb9a4b333994e2d6346de6e018eb" exitCode=0 Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.659779 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerDied","Data":"e3d966b9db60bf1ecefa006399cb7e861796eb9a4b333994e2d6346de6e018eb"} Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.660120 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wf855" event={"ID":"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f","Type":"ContainerDied","Data":"d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c"} Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.660138 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d005c7bd5977f3de475a1f62730674d5dafbb9d9d4fe65c44e3dd12b1577114c" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.706084 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.779230 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content\") pod \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.779303 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jccsv\" (UniqueName: \"kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv\") pod \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.779452 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities\") pod \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\" (UID: \"5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f\") " Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.780476 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities" (OuterVolumeSpecName: "utilities") pod "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" (UID: "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.789428 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv" (OuterVolumeSpecName: "kube-api-access-jccsv") pod "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" (UID: "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f"). InnerVolumeSpecName "kube-api-access-jccsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.840197 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" (UID: "5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.882281 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.882354 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jccsv\" (UniqueName: \"kubernetes.io/projected/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-kube-api-access-jccsv\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:09 crc kubenswrapper[4967]: I1121 17:21:09.882369 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:10 crc kubenswrapper[4967]: I1121 17:21:10.550495 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" path="/var/lib/kubelet/pods/5b2ca852-7745-4c5d-b784-edbc306dd5cd/volumes" Nov 21 17:21:10 crc kubenswrapper[4967]: I1121 17:21:10.670918 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wf855" Nov 21 17:21:10 crc kubenswrapper[4967]: I1121 17:21:10.698619 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:21:10 crc kubenswrapper[4967]: I1121 17:21:10.709344 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wf855"] Nov 21 17:21:12 crc kubenswrapper[4967]: I1121 17:21:12.550764 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" path="/var/lib/kubelet/pods/5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f/volumes" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.113430 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114582 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114597 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114621 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114627 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114640 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114646 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114659 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114665 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114681 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114689 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114702 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114708 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="extract-utilities" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114719 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114725 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114746 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114751 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: E1121 17:21:20.114761 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.114766 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="extract-content" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.115007 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="98f6ecd8-bdd3-4b0f-acba-2826d7c3c052" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.115028 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b2ca852-7745-4c5d-b784-edbc306dd5cd" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.115040 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a3e5a20-20c4-4f08-a7bd-49be8c2d8e2f" containerName="registry-server" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.117092 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.133745 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.233080 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.233155 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.233708 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59cn6\" (UniqueName: \"kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.336543 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59cn6\" (UniqueName: \"kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.336717 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.336775 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.337231 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.337337 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.374373 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59cn6\" (UniqueName: \"kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6\") pod \"redhat-operators-c6q9n\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:20 crc kubenswrapper[4967]: I1121 17:21:20.449050 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:21 crc kubenswrapper[4967]: I1121 17:21:21.010875 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:21 crc kubenswrapper[4967]: I1121 17:21:21.795333 4967 generic.go:334] "Generic (PLEG): container finished" podID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerID="00f0e0734477e519d93d9a749685f8e285f9b76bee2360765376d27f980118a8" exitCode=0 Nov 21 17:21:21 crc kubenswrapper[4967]: I1121 17:21:21.795421 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerDied","Data":"00f0e0734477e519d93d9a749685f8e285f9b76bee2360765376d27f980118a8"} Nov 21 17:21:21 crc kubenswrapper[4967]: I1121 17:21:21.795680 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerStarted","Data":"57b6e163c30e48e99f76a9c83397384b22d4a74cb84018bfe9557810a0eea70c"} Nov 21 17:21:23 crc kubenswrapper[4967]: I1121 17:21:23.826388 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerStarted","Data":"32378bf8cf1f19da82f01eae38d1d9b746803d51396274e2042cf25813403bd8"} Nov 21 17:21:29 crc kubenswrapper[4967]: I1121 17:21:29.899117 4967 generic.go:334] "Generic (PLEG): container finished" podID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerID="32378bf8cf1f19da82f01eae38d1d9b746803d51396274e2042cf25813403bd8" exitCode=0 Nov 21 17:21:29 crc kubenswrapper[4967]: I1121 17:21:29.899200 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerDied","Data":"32378bf8cf1f19da82f01eae38d1d9b746803d51396274e2042cf25813403bd8"} Nov 21 17:21:36 crc kubenswrapper[4967]: I1121 17:21:36.979030 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerStarted","Data":"a6cfc83ba5a698372f22487b0352d1e093b51cda047d2e2291ca838587515ba7"} Nov 21 17:21:39 crc kubenswrapper[4967]: I1121 17:21:39.032879 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c6q9n" podStartSLOduration=5.716538081 podStartE2EDuration="19.032859546s" podCreationTimestamp="2025-11-21 17:21:20 +0000 UTC" firstStartedPulling="2025-11-21 17:21:21.79886168 +0000 UTC m=+6370.057382728" lastFinishedPulling="2025-11-21 17:21:35.115183185 +0000 UTC m=+6383.373704193" observedRunningTime="2025-11-21 17:21:39.025077623 +0000 UTC m=+6387.283598671" watchObservedRunningTime="2025-11-21 17:21:39.032859546 +0000 UTC m=+6387.291380554" Nov 21 17:21:40 crc kubenswrapper[4967]: I1121 17:21:40.449604 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:40 crc kubenswrapper[4967]: I1121 17:21:40.450649 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:41 crc kubenswrapper[4967]: I1121 17:21:41.501408 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c6q9n" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="registry-server" probeResult="failure" output=< Nov 21 17:21:41 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:21:41 crc kubenswrapper[4967]: > Nov 21 17:21:50 crc kubenswrapper[4967]: I1121 17:21:50.507161 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:50 crc kubenswrapper[4967]: I1121 17:21:50.568136 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:51 crc kubenswrapper[4967]: I1121 17:21:51.311918 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:52 crc kubenswrapper[4967]: I1121 17:21:52.154691 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c6q9n" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="registry-server" containerID="cri-o://a6cfc83ba5a698372f22487b0352d1e093b51cda047d2e2291ca838587515ba7" gracePeriod=2 Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.171229 4967 generic.go:334] "Generic (PLEG): container finished" podID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerID="a6cfc83ba5a698372f22487b0352d1e093b51cda047d2e2291ca838587515ba7" exitCode=0 Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.171295 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerDied","Data":"a6cfc83ba5a698372f22487b0352d1e093b51cda047d2e2291ca838587515ba7"} Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.313601 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.388705 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") pod \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.388880 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities\") pod \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.389135 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59cn6\" (UniqueName: \"kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6\") pod \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.399695 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities" (OuterVolumeSpecName: "utilities") pod "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" (UID: "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.408568 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6" (OuterVolumeSpecName: "kube-api-access-59cn6") pod "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" (UID: "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb"). InnerVolumeSpecName "kube-api-access-59cn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.491693 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" (UID: "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.492075 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") pod \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\" (UID: \"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb\") " Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.493260 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59cn6\" (UniqueName: \"kubernetes.io/projected/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-kube-api-access-59cn6\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.493279 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:53 crc kubenswrapper[4967]: W1121 17:21:53.493358 4967 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb/volumes/kubernetes.io~empty-dir/catalog-content Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.493399 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" (UID: "b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:21:53 crc kubenswrapper[4967]: I1121 17:21:53.596439 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.184494 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c6q9n" event={"ID":"b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb","Type":"ContainerDied","Data":"57b6e163c30e48e99f76a9c83397384b22d4a74cb84018bfe9557810a0eea70c"} Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.184573 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c6q9n" Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.184857 4967 scope.go:117] "RemoveContainer" containerID="a6cfc83ba5a698372f22487b0352d1e093b51cda047d2e2291ca838587515ba7" Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.218090 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.229709 4967 scope.go:117] "RemoveContainer" containerID="32378bf8cf1f19da82f01eae38d1d9b746803d51396274e2042cf25813403bd8" Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.232616 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c6q9n"] Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.271219 4967 scope.go:117] "RemoveContainer" containerID="00f0e0734477e519d93d9a749685f8e285f9b76bee2360765376d27f980118a8" Nov 21 17:21:54 crc kubenswrapper[4967]: I1121 17:21:54.550207 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" path="/var/lib/kubelet/pods/b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb/volumes" Nov 21 17:22:16 crc kubenswrapper[4967]: I1121 17:22:16.521978 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:22:16 crc kubenswrapper[4967]: I1121 17:22:16.522522 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:22:46 crc kubenswrapper[4967]: I1121 17:22:46.523288 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:22:46 crc kubenswrapper[4967]: I1121 17:22:46.523959 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.723389 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-klvmk"] Nov 21 17:22:57 crc kubenswrapper[4967]: E1121 17:22:57.724649 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="extract-utilities" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.724667 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="extract-utilities" Nov 21 17:22:57 crc kubenswrapper[4967]: E1121 17:22:57.724688 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="registry-server" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.724696 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="registry-server" Nov 21 17:22:57 crc kubenswrapper[4967]: E1121 17:22:57.724770 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="extract-content" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.724778 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="extract-content" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.725044 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2634aa2-89cd-4dee-9f7e-b958fcf4f5bb" containerName="registry-server" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.726137 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.745831 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-klvmk"] Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.792992 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.793229 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.793282 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lprn4\" (UniqueName: \"kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.897108 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.897365 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.897402 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lprn4\" (UniqueName: \"kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.905390 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.906177 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.916947 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lprn4\" (UniqueName: \"kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4\") pod \"heat-db-sync-klvmk\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.924088 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-qkxlx"] Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.927122 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:57 crc kubenswrapper[4967]: I1121 17:22:57.932408 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:57.999911 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67bjq\" (UniqueName: \"kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:57.999992 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.000087 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.000252 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.014772 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-qkxlx"] Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.046905 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-klvmk" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.102528 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.102908 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.102984 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67bjq\" (UniqueName: \"kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.103005 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.106187 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.108060 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.111270 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.118776 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67bjq\" (UniqueName: \"kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq\") pod \"aodh-db-sync-qkxlx\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.367929 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.578671 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-klvmk"] Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.940001 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-qkxlx"] Nov 21 17:22:58 crc kubenswrapper[4967]: I1121 17:22:58.973335 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-klvmk" event={"ID":"36a77740-439b-489e-ae8b-d4f675be9f14","Type":"ContainerStarted","Data":"f40a0dc08b0cf83a754c19973dd5960b802981bcee9d12c5d1c5ac714f36819c"} Nov 21 17:22:59 crc kubenswrapper[4967]: I1121 17:22:59.991702 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-qkxlx" event={"ID":"6d018f81-8caf-43f2-8ce6-5a799aacde0d","Type":"ContainerStarted","Data":"14e0cbd3e532b1770cfce700a9ab4642fcd0ff8f616d57232171d99bbd131d6b"} Nov 21 17:23:00 crc kubenswrapper[4967]: I1121 17:23:00.658611 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:00 crc kubenswrapper[4967]: I1121 17:23:00.658998 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-central-agent" containerID="cri-o://52a72174fca64a58a58e14f6ef18e868cc0cec3a48d2df09a6a1d1a3c06c2923" gracePeriod=30 Nov 21 17:23:00 crc kubenswrapper[4967]: I1121 17:23:00.659145 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-notification-agent" containerID="cri-o://b0fd7712595dfef1c9e78fd9fbdc879956b6b2a0c8170e1053c255c28e81b21c" gracePeriod=30 Nov 21 17:23:00 crc kubenswrapper[4967]: I1121 17:23:00.659134 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="proxy-httpd" containerID="cri-o://04d9448d726db4685d1d24e640d1e76a6c53b587de322fb62bd815532f09fbd1" gracePeriod=30 Nov 21 17:23:00 crc kubenswrapper[4967]: I1121 17:23:00.659098 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="sg-core" containerID="cri-o://4a60542b861b693f099a7157093eb814f0b805256dd297d8df1cca920b2fcda9" gracePeriod=30 Nov 21 17:23:01 crc kubenswrapper[4967]: I1121 17:23:01.020326 4967 generic.go:334] "Generic (PLEG): container finished" podID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerID="4a60542b861b693f099a7157093eb814f0b805256dd297d8df1cca920b2fcda9" exitCode=2 Nov 21 17:23:01 crc kubenswrapper[4967]: I1121 17:23:01.020443 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerDied","Data":"4a60542b861b693f099a7157093eb814f0b805256dd297d8df1cca920b2fcda9"} Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.063630 4967 generic.go:334] "Generic (PLEG): container finished" podID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerID="04d9448d726db4685d1d24e640d1e76a6c53b587de322fb62bd815532f09fbd1" exitCode=0 Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.063932 4967 generic.go:334] "Generic (PLEG): container finished" podID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerID="b0fd7712595dfef1c9e78fd9fbdc879956b6b2a0c8170e1053c255c28e81b21c" exitCode=0 Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.063736 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerDied","Data":"04d9448d726db4685d1d24e640d1e76a6c53b587de322fb62bd815532f09fbd1"} Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.064000 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerDied","Data":"b0fd7712595dfef1c9e78fd9fbdc879956b6b2a0c8170e1053c255c28e81b21c"} Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.064025 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerDied","Data":"52a72174fca64a58a58e14f6ef18e868cc0cec3a48d2df09a6a1d1a3c06c2923"} Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.063943 4967 generic.go:334] "Generic (PLEG): container finished" podID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerID="52a72174fca64a58a58e14f6ef18e868cc0cec3a48d2df09a6a1d1a3c06c2923" exitCode=0 Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.296857 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335551 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335627 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335652 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335685 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335827 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.335981 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.336030 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.336074 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vth8c\" (UniqueName: \"kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c\") pod \"de37571f-76bd-4e9b-9141-cf2c056bab84\" (UID: \"de37571f-76bd-4e9b-9141-cf2c056bab84\") " Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.339676 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.350124 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.351204 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts" (OuterVolumeSpecName: "scripts") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.358198 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c" (OuterVolumeSpecName: "kube-api-access-vth8c") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "kube-api-access-vth8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.414316 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.442665 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.442700 4967 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.442710 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vth8c\" (UniqueName: \"kubernetes.io/projected/de37571f-76bd-4e9b-9141-cf2c056bab84-kube-api-access-vth8c\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.442728 4967 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.442736 4967 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de37571f-76bd-4e9b-9141-cf2c056bab84-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.541171 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.546179 4967 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.557357 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.594711 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data" (OuterVolumeSpecName: "config-data") pod "de37571f-76bd-4e9b-9141-cf2c056bab84" (UID: "de37571f-76bd-4e9b-9141-cf2c056bab84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.656546 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:02 crc kubenswrapper[4967]: I1121 17:23:02.656593 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de37571f-76bd-4e9b-9141-cf2c056bab84-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.170321 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de37571f-76bd-4e9b-9141-cf2c056bab84","Type":"ContainerDied","Data":"cb62ba1d8ce6af2a850e82974092886deb35a115a37b9b4685856fbedd1049ef"} Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.170404 4967 scope.go:117] "RemoveContainer" containerID="04d9448d726db4685d1d24e640d1e76a6c53b587de322fb62bd815532f09fbd1" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.170694 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.259186 4967 scope.go:117] "RemoveContainer" containerID="4a60542b861b693f099a7157093eb814f0b805256dd297d8df1cca920b2fcda9" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.365533 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.397860 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.453783 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:03 crc kubenswrapper[4967]: E1121 17:23:03.454576 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="sg-core" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454591 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="sg-core" Nov 21 17:23:03 crc kubenswrapper[4967]: E1121 17:23:03.454608 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-central-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454613 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-central-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: E1121 17:23:03.454649 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="proxy-httpd" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454655 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="proxy-httpd" Nov 21 17:23:03 crc kubenswrapper[4967]: E1121 17:23:03.454681 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-notification-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454688 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-notification-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454934 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-central-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454955 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="ceilometer-notification-agent" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454968 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="proxy-httpd" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.454978 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" containerName="sg-core" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.459513 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.464067 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.464390 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.468862 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.490738 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.535578 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-scripts\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.535962 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.536102 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvtpw\" (UniqueName: \"kubernetes.io/projected/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-kube-api-access-mvtpw\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.536181 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.536290 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.536402 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-config-data\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.537225 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-run-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.537441 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-log-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.550158 4967 scope.go:117] "RemoveContainer" containerID="b0fd7712595dfef1c9e78fd9fbdc879956b6b2a0c8170e1053c255c28e81b21c" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.612724 4967 scope.go:117] "RemoveContainer" containerID="52a72174fca64a58a58e14f6ef18e868cc0cec3a48d2df09a6a1d1a3c06c2923" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.639598 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-config-data\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.639665 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-run-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640132 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-log-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640417 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-scripts\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640567 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640680 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvtpw\" (UniqueName: \"kubernetes.io/projected/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-kube-api-access-mvtpw\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640734 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.640846 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.641454 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-run-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.642086 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-log-httpd\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.648406 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.649257 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.651669 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.667155 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-scripts\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.671324 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-config-data\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.675074 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvtpw\" (UniqueName: \"kubernetes.io/projected/be34fe3c-c5f0-4eaf-a694-02b5b5bf343b-kube-api-access-mvtpw\") pod \"ceilometer-0\" (UID: \"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b\") " pod="openstack/ceilometer-0" Nov 21 17:23:03 crc kubenswrapper[4967]: I1121 17:23:03.804684 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.437220 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.556830 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de37571f-76bd-4e9b-9141-cf2c056bab84" path="/var/lib/kubelet/pods/de37571f-76bd-4e9b-9141-cf2c056bab84/volumes" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.945766 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.947966 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.951529 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.951538 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.951699 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-nc5kp" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.960686 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 21 17:23:04 crc kubenswrapper[4967]: I1121 17:23:04.985431 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.084372 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.084419 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lhzb\" (UniqueName: \"kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.084493 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.084530 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.085212 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.085270 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.085399 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.085420 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.085526 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.188944 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189010 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lhzb\" (UniqueName: \"kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189066 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189105 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189245 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189268 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189353 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189386 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.189425 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.190296 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.190363 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.190602 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.192199 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.193132 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.200743 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.200813 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.211479 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.214119 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lhzb\" (UniqueName: \"kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.238019 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b","Type":"ContainerStarted","Data":"aed54c63ad8e820e70d520320092ab2213dc99c8683c8f7713d0673e42be169f"} Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.250532 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"tempest-tests-tempest\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.288769 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 17:23:05 crc kubenswrapper[4967]: I1121 17:23:05.858428 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 17:23:05 crc kubenswrapper[4967]: W1121 17:23:05.880427 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode74fdffd_f5c7_4be6_8d37_5d9e07704aaa.slice/crio-7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7 WatchSource:0}: Error finding container 7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7: Status 404 returned error can't find the container with id 7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7 Nov 21 17:23:06 crc kubenswrapper[4967]: I1121 17:23:06.264538 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa","Type":"ContainerStarted","Data":"7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7"} Nov 21 17:23:16 crc kubenswrapper[4967]: I1121 17:23:16.522213 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:23:16 crc kubenswrapper[4967]: I1121 17:23:16.523199 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:23:16 crc kubenswrapper[4967]: I1121 17:23:16.523277 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:23:16 crc kubenswrapper[4967]: I1121 17:23:16.524366 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:23:16 crc kubenswrapper[4967]: I1121 17:23:16.524442 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" gracePeriod=600 Nov 21 17:23:17 crc kubenswrapper[4967]: I1121 17:23:17.461529 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" exitCode=0 Nov 21 17:23:17 crc kubenswrapper[4967]: I1121 17:23:17.461827 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b"} Nov 21 17:23:17 crc kubenswrapper[4967]: I1121 17:23:17.462049 4967 scope.go:117] "RemoveContainer" containerID="2f2eb3bd789553f169bc0896a7408574c03a09e64d1320010f023541ba17ad69" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.544076 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.620767 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.625549 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7lhzb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(e74fdffd-f5c7-4be6-8d37-5d9e07704aaa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.628471 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" Nov 21 17:23:42 crc kubenswrapper[4967]: I1121 17:23:42.842775 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.844185 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:23:42 crc kubenswrapper[4967]: E1121 17:23:42.847014 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" Nov 21 17:23:54 crc kubenswrapper[4967]: I1121 17:23:54.537274 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.538412 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.582417 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.582478 4967 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.582608 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:aodh-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:AodhPassword,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:AodhPassword,Optional:nil,},},},EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:aodh-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67bjq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42402,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod aodh-db-sync-qkxlx_openstack(6d018f81-8caf-43f2-8ce6-5a799aacde0d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.583727 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/aodh-db-sync-qkxlx" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" Nov 21 17:23:54 crc kubenswrapper[4967]: E1121 17:23:54.979224 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"aodh-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-aodh-api:current-tested\\\"\"" pod="openstack/aodh-db-sync-qkxlx" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.125183 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.125243 4967 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.125377 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lprn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-klvmk_openstack(36a77740-439b-489e-ae8b-d4f675be9f14): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.126551 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-klvmk" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.639182 4967 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.639777 4967 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.639936 4967 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf4h54dhc6hffh559h666h79h54h58ch54dh559h664h646h5fdh8chb5h694hb6hfh5d6hb8h86h664hc9h7h5bfhc8h74hb6h555h64fh5c7q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mvtpw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(be34fe3c-c5f0-4eaf-a694-02b5b5bf343b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 17:23:55 crc kubenswrapper[4967]: I1121 17:23:55.817244 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 21 17:23:55 crc kubenswrapper[4967]: E1121 17:23:55.987241 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-klvmk" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" Nov 21 17:23:57 crc kubenswrapper[4967]: I1121 17:23:56.999641 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b","Type":"ContainerStarted","Data":"3cd3f8bd7fd914c3392dcb99d6099e906381fbbd14e25824be5ef47249ba2026"} Nov 21 17:23:58 crc kubenswrapper[4967]: I1121 17:23:58.011943 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa","Type":"ContainerStarted","Data":"84ec716b35ca4d68dd68f0435cee2e226fa6b64f77d91d801231aaa05f5e7e2c"} Nov 21 17:23:58 crc kubenswrapper[4967]: I1121 17:23:58.013600 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b","Type":"ContainerStarted","Data":"2dfb58ed93a3e0e6a57ab93f0809765230cc7df35ac2c6948fae21d280a58b5d"} Nov 21 17:23:59 crc kubenswrapper[4967]: E1121 17:23:59.406102 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="be34fe3c-c5f0-4eaf-a694-02b5b5bf343b" Nov 21 17:24:00 crc kubenswrapper[4967]: I1121 17:24:00.083674 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b","Type":"ContainerStarted","Data":"3fab9de2cb07c8e1efa75e0c98551213a7a89f1c30645505e0860dc5dd0b3100"} Nov 21 17:24:00 crc kubenswrapper[4967]: I1121 17:24:00.084246 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 17:24:00 crc kubenswrapper[4967]: E1121 17:24:00.095918 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="be34fe3c-c5f0-4eaf-a694-02b5b5bf343b" Nov 21 17:24:00 crc kubenswrapper[4967]: I1121 17:24:00.200514 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=7.269874549 podStartE2EDuration="57.200488134s" podCreationTimestamp="2025-11-21 17:23:03 +0000 UTC" firstStartedPulling="2025-11-21 17:23:05.883815483 +0000 UTC m=+6474.142336491" lastFinishedPulling="2025-11-21 17:23:55.814429068 +0000 UTC m=+6524.072950076" observedRunningTime="2025-11-21 17:23:58.04372831 +0000 UTC m=+6526.302249338" watchObservedRunningTime="2025-11-21 17:24:00.200488134 +0000 UTC m=+6528.459009152" Nov 21 17:24:01 crc kubenswrapper[4967]: E1121 17:24:01.096648 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="be34fe3c-c5f0-4eaf-a694-02b5b5bf343b" Nov 21 17:24:05 crc kubenswrapper[4967]: I1121 17:24:05.536888 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:24:05 crc kubenswrapper[4967]: E1121 17:24:05.539251 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:24:07 crc kubenswrapper[4967]: I1121 17:24:07.195730 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-klvmk" event={"ID":"36a77740-439b-489e-ae8b-d4f675be9f14","Type":"ContainerStarted","Data":"fdf839b7cb225643cb400e343664299a4d3e13d27f1a54f375d9e9972cad29be"} Nov 21 17:24:07 crc kubenswrapper[4967]: I1121 17:24:07.219815 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-klvmk" podStartSLOduration=2.087477832 podStartE2EDuration="1m10.21979554s" podCreationTimestamp="2025-11-21 17:22:57 +0000 UTC" firstStartedPulling="2025-11-21 17:22:58.589551211 +0000 UTC m=+6466.848072209" lastFinishedPulling="2025-11-21 17:24:06.721868909 +0000 UTC m=+6534.980389917" observedRunningTime="2025-11-21 17:24:07.211562005 +0000 UTC m=+6535.470083013" watchObservedRunningTime="2025-11-21 17:24:07.21979554 +0000 UTC m=+6535.478316548" Nov 21 17:24:08 crc kubenswrapper[4967]: I1121 17:24:08.908438 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 17:24:09 crc kubenswrapper[4967]: I1121 17:24:09.225103 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-qkxlx" event={"ID":"6d018f81-8caf-43f2-8ce6-5a799aacde0d","Type":"ContainerStarted","Data":"cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd"} Nov 21 17:24:09 crc kubenswrapper[4967]: I1121 17:24:09.261920 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-qkxlx" podStartSLOduration=2.4004159449999998 podStartE2EDuration="1m12.261891909s" podCreationTimestamp="2025-11-21 17:22:57 +0000 UTC" firstStartedPulling="2025-11-21 17:22:59.043935805 +0000 UTC m=+6467.302456813" lastFinishedPulling="2025-11-21 17:24:08.905411769 +0000 UTC m=+6537.163932777" observedRunningTime="2025-11-21 17:24:09.244478171 +0000 UTC m=+6537.502999179" watchObservedRunningTime="2025-11-21 17:24:09.261891909 +0000 UTC m=+6537.520412917" Nov 21 17:24:10 crc kubenswrapper[4967]: I1121 17:24:10.241471 4967 generic.go:334] "Generic (PLEG): container finished" podID="36a77740-439b-489e-ae8b-d4f675be9f14" containerID="fdf839b7cb225643cb400e343664299a4d3e13d27f1a54f375d9e9972cad29be" exitCode=0 Nov 21 17:24:10 crc kubenswrapper[4967]: I1121 17:24:10.241536 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-klvmk" event={"ID":"36a77740-439b-489e-ae8b-d4f675be9f14","Type":"ContainerDied","Data":"fdf839b7cb225643cb400e343664299a4d3e13d27f1a54f375d9e9972cad29be"} Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.089476 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-klvmk" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.143289 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lprn4\" (UniqueName: \"kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4\") pod \"36a77740-439b-489e-ae8b-d4f675be9f14\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.143376 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle\") pod \"36a77740-439b-489e-ae8b-d4f675be9f14\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.143564 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data\") pod \"36a77740-439b-489e-ae8b-d4f675be9f14\" (UID: \"36a77740-439b-489e-ae8b-d4f675be9f14\") " Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.150551 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4" (OuterVolumeSpecName: "kube-api-access-lprn4") pod "36a77740-439b-489e-ae8b-d4f675be9f14" (UID: "36a77740-439b-489e-ae8b-d4f675be9f14"). InnerVolumeSpecName "kube-api-access-lprn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.181437 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "36a77740-439b-489e-ae8b-d4f675be9f14" (UID: "36a77740-439b-489e-ae8b-d4f675be9f14"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.233235 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data" (OuterVolumeSpecName: "config-data") pod "36a77740-439b-489e-ae8b-d4f675be9f14" (UID: "36a77740-439b-489e-ae8b-d4f675be9f14"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.247543 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lprn4\" (UniqueName: \"kubernetes.io/projected/36a77740-439b-489e-ae8b-d4f675be9f14-kube-api-access-lprn4\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.247580 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.247590 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36a77740-439b-489e-ae8b-d4f675be9f14-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.268879 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-klvmk" event={"ID":"36a77740-439b-489e-ae8b-d4f675be9f14","Type":"ContainerDied","Data":"f40a0dc08b0cf83a754c19973dd5960b802981bcee9d12c5d1c5ac714f36819c"} Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.269127 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f40a0dc08b0cf83a754c19973dd5960b802981bcee9d12c5d1c5ac714f36819c" Nov 21 17:24:12 crc kubenswrapper[4967]: I1121 17:24:12.268919 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-klvmk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.279680 4967 generic.go:334] "Generic (PLEG): container finished" podID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" containerID="cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd" exitCode=0 Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.279793 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-qkxlx" event={"ID":"6d018f81-8caf-43f2-8ce6-5a799aacde0d","Type":"ContainerDied","Data":"cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd"} Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.470996 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-79b74c6887-kxssl"] Nov 21 17:24:13 crc kubenswrapper[4967]: E1121 17:24:13.471794 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" containerName="heat-db-sync" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.471821 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" containerName="heat-db-sync" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.472152 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" containerName="heat-db-sync" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.473341 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.481225 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6f79c69644-jcsfk"] Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.483417 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.494640 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-5f5f4d85f8-g4l64"] Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.496292 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.517348 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-79b74c6887-kxssl"] Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.546227 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5f5f4d85f8-g4l64"] Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.559048 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6f79c69644-jcsfk"] Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.590900 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-combined-ca-bundle\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591441 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkh4j\" (UniqueName: \"kubernetes.io/projected/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-kube-api-access-qkh4j\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591553 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data-custom\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591641 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591762 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data-custom\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591899 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.591969 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-public-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.592086 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-public-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.590939 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.593502 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmfjz\" (UniqueName: \"kubernetes.io/projected/ed27e21a-aab4-4d97-a6de-34158f1e1e03-kube-api-access-dmfjz\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.593851 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-combined-ca-bundle\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.594203 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-internal-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.594371 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.594564 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-combined-ca-bundle\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.594698 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data-custom\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.594883 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plhjw\" (UniqueName: \"kubernetes.io/projected/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-kube-api-access-plhjw\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.595156 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-internal-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698213 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plhjw\" (UniqueName: \"kubernetes.io/projected/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-kube-api-access-plhjw\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698283 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-internal-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698367 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-combined-ca-bundle\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698396 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkh4j\" (UniqueName: \"kubernetes.io/projected/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-kube-api-access-qkh4j\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698453 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data-custom\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698481 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.698730 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data-custom\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700041 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700110 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-public-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700577 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-public-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700658 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmfjz\" (UniqueName: \"kubernetes.io/projected/ed27e21a-aab4-4d97-a6de-34158f1e1e03-kube-api-access-dmfjz\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700737 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-combined-ca-bundle\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700802 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-internal-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700888 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.700933 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-combined-ca-bundle\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.701016 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data-custom\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.705721 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-internal-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.706434 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-public-tls-certs\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.714599 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-combined-ca-bundle\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.716426 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plhjw\" (UniqueName: \"kubernetes.io/projected/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-kube-api-access-plhjw\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.720746 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmfjz\" (UniqueName: \"kubernetes.io/projected/ed27e21a-aab4-4d97-a6de-34158f1e1e03-kube-api-access-dmfjz\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.720940 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkh4j\" (UniqueName: \"kubernetes.io/projected/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-kube-api-access-qkh4j\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.721609 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data-custom\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.721678 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-internal-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.722178 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.723529 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-combined-ca-bundle\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.725066 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-config-data-custom\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.726827 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-public-tls-certs\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.727014 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data-custom\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.727637 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3-config-data\") pod \"heat-cfnapi-5f5f4d85f8-g4l64\" (UID: \"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3\") " pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.731762 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed27e21a-aab4-4d97-a6de-34158f1e1e03-combined-ca-bundle\") pod \"heat-engine-79b74c6887-kxssl\" (UID: \"ed27e21a-aab4-4d97-a6de-34158f1e1e03\") " pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.731911 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606-config-data\") pod \"heat-api-6f79c69644-jcsfk\" (UID: \"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606\") " pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.794744 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.809980 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:13 crc kubenswrapper[4967]: I1121 17:24:13.824808 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.311158 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"be34fe3c-c5f0-4eaf-a694-02b5b5bf343b","Type":"ContainerStarted","Data":"c8c14810d8fbe31a6642a8f1af4688bd6b57bf62db68ed5145c1738c3f66d7ff"} Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.425690 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.115709193 podStartE2EDuration="1m11.425672871s" podCreationTimestamp="2025-11-21 17:23:03 +0000 UTC" firstStartedPulling="2025-11-21 17:23:04.455965036 +0000 UTC m=+6472.714486044" lastFinishedPulling="2025-11-21 17:24:13.765928704 +0000 UTC m=+6542.024449722" observedRunningTime="2025-11-21 17:24:14.341644364 +0000 UTC m=+6542.600165392" watchObservedRunningTime="2025-11-21 17:24:14.425672871 +0000 UTC m=+6542.684193879" Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.439169 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-79b74c6887-kxssl"] Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.517488 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6f79c69644-jcsfk"] Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.650014 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-5f5f4d85f8-g4l64"] Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.857800 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.938808 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data\") pod \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.938986 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle\") pod \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.939013 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67bjq\" (UniqueName: \"kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq\") pod \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.939167 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts\") pod \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\" (UID: \"6d018f81-8caf-43f2-8ce6-5a799aacde0d\") " Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.954577 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq" (OuterVolumeSpecName: "kube-api-access-67bjq") pod "6d018f81-8caf-43f2-8ce6-5a799aacde0d" (UID: "6d018f81-8caf-43f2-8ce6-5a799aacde0d"). InnerVolumeSpecName "kube-api-access-67bjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.962423 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts" (OuterVolumeSpecName: "scripts") pod "6d018f81-8caf-43f2-8ce6-5a799aacde0d" (UID: "6d018f81-8caf-43f2-8ce6-5a799aacde0d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:14 crc kubenswrapper[4967]: I1121 17:24:14.985059 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data" (OuterVolumeSpecName: "config-data") pod "6d018f81-8caf-43f2-8ce6-5a799aacde0d" (UID: "6d018f81-8caf-43f2-8ce6-5a799aacde0d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.001430 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d018f81-8caf-43f2-8ce6-5a799aacde0d" (UID: "6d018f81-8caf-43f2-8ce6-5a799aacde0d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.042393 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.042671 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.042758 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d018f81-8caf-43f2-8ce6-5a799aacde0d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.042891 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67bjq\" (UniqueName: \"kubernetes.io/projected/6d018f81-8caf-43f2-8ce6-5a799aacde0d-kube-api-access-67bjq\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.334908 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" event={"ID":"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3","Type":"ContainerStarted","Data":"538b12dd7bd4ee4d72187f05e20a4ba888c5cd93ef9028c87a3eeb4a9da099dd"} Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.338407 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79b74c6887-kxssl" event={"ID":"ed27e21a-aab4-4d97-a6de-34158f1e1e03","Type":"ContainerStarted","Data":"5c072e4925b1d411bfa41bb75fd6c81f56324cea175f00e78ed46fdf898d9ff2"} Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.338461 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-79b74c6887-kxssl" event={"ID":"ed27e21a-aab4-4d97-a6de-34158f1e1e03","Type":"ContainerStarted","Data":"d1cfb336696a8bb1a97fd6258061004a2593d9474fcd5d1ca03d54a9c4eff402"} Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.339411 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.344197 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-qkxlx" event={"ID":"6d018f81-8caf-43f2-8ce6-5a799aacde0d","Type":"ContainerDied","Data":"14e0cbd3e532b1770cfce700a9ab4642fcd0ff8f616d57232171d99bbd131d6b"} Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.344237 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14e0cbd3e532b1770cfce700a9ab4642fcd0ff8f616d57232171d99bbd131d6b" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.344300 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-qkxlx" Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.350002 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f79c69644-jcsfk" event={"ID":"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606","Type":"ContainerStarted","Data":"e20f8fbdbf5f5db3c132e8468413bb0139b74e28146425bf9a5f027db4d1817a"} Nov 21 17:24:15 crc kubenswrapper[4967]: I1121 17:24:15.367097 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-79b74c6887-kxssl" podStartSLOduration=2.367077085 podStartE2EDuration="2.367077085s" podCreationTimestamp="2025-11-21 17:24:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:24:15.357720737 +0000 UTC m=+6543.616241745" watchObservedRunningTime="2025-11-21 17:24:15.367077085 +0000 UTC m=+6543.625598093" Nov 21 17:24:16 crc kubenswrapper[4967]: E1121 17:24:16.036805 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.381788 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6f79c69644-jcsfk" event={"ID":"c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606","Type":"ContainerStarted","Data":"56cd1b3791f0a431c486bfe43ed6f74d00f249ad89c399ae40ff0ef5af61f0db"} Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.382427 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.385905 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" event={"ID":"b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3","Type":"ContainerStarted","Data":"46dd9a3aa46d65356908cdde68e6888b1a9c9d32d2a7742af9e62db732c47ccb"} Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.386109 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.403464 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-6f79c69644-jcsfk" podStartSLOduration=2.503861193 podStartE2EDuration="4.40344166s" podCreationTimestamp="2025-11-21 17:24:13 +0000 UTC" firstStartedPulling="2025-11-21 17:24:14.524224214 +0000 UTC m=+6542.782745222" lastFinishedPulling="2025-11-21 17:24:16.423804661 +0000 UTC m=+6544.682325689" observedRunningTime="2025-11-21 17:24:17.398850118 +0000 UTC m=+6545.657371136" watchObservedRunningTime="2025-11-21 17:24:17.40344166 +0000 UTC m=+6545.661962678" Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.431732 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" podStartSLOduration=2.685167846 podStartE2EDuration="4.431706369s" podCreationTimestamp="2025-11-21 17:24:13 +0000 UTC" firstStartedPulling="2025-11-21 17:24:14.684759612 +0000 UTC m=+6542.943280620" lastFinishedPulling="2025-11-21 17:24:16.431298115 +0000 UTC m=+6544.689819143" observedRunningTime="2025-11-21 17:24:17.422078274 +0000 UTC m=+6545.680599302" watchObservedRunningTime="2025-11-21 17:24:17.431706369 +0000 UTC m=+6545.690227377" Nov 21 17:24:17 crc kubenswrapper[4967]: I1121 17:24:17.537693 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:24:17 crc kubenswrapper[4967]: E1121 17:24:17.538059 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:24:24 crc kubenswrapper[4967]: E1121 17:24:24.547474 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.553529 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-5f5f4d85f8-g4l64" Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.555428 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-6f79c69644-jcsfk" Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.666401 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.666734 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" podUID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" containerName="heat-cfnapi" containerID="cri-o://38a92be94a6966320e3547c6cbaffa992d053ccbe6f193300d9dceb07407aad2" gracePeriod=60 Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.685991 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 17:24:25 crc kubenswrapper[4967]: I1121 17:24:25.686259 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-f74c7fcfc-6jdr9" podUID="fba7e61c-eb55-4772-9904-7e6ae77ec941" containerName="heat-api" containerID="cri-o://e7a50f5b93a733956a7277975bb528d0b22dd89e5d74ffafc51a311fbe5d541a" gracePeriod=60 Nov 21 17:24:26 crc kubenswrapper[4967]: E1121 17:24:26.100772 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.593901 4967 generic.go:334] "Generic (PLEG): container finished" podID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" containerID="38a92be94a6966320e3547c6cbaffa992d053ccbe6f193300d9dceb07407aad2" exitCode=0 Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.594535 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" event={"ID":"c53783e2-ed84-49c6-b688-9c6603a6c3b1","Type":"ContainerDied","Data":"38a92be94a6966320e3547c6cbaffa992d053ccbe6f193300d9dceb07407aad2"} Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.613041 4967 generic.go:334] "Generic (PLEG): container finished" podID="fba7e61c-eb55-4772-9904-7e6ae77ec941" containerID="e7a50f5b93a733956a7277975bb528d0b22dd89e5d74ffafc51a311fbe5d541a" exitCode=0 Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.613133 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-f74c7fcfc-6jdr9" event={"ID":"fba7e61c-eb55-4772-9904-7e6ae77ec941","Type":"ContainerDied","Data":"e7a50f5b93a733956a7277975bb528d0b22dd89e5d74ffafc51a311fbe5d541a"} Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.613219 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-f74c7fcfc-6jdr9" event={"ID":"fba7e61c-eb55-4772-9904-7e6ae77ec941","Type":"ContainerDied","Data":"ed942c1a50b45424d039da2cfd87491ab74a847ae9e9ba6e30c19b7702c07651"} Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.613230 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed942c1a50b45424d039da2cfd87491ab74a847ae9e9ba6e30c19b7702c07651" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.718963 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.823979 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.855816 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.855881 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.855939 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.855991 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnz8q\" (UniqueName: \"kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.856117 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.856193 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom\") pod \"fba7e61c-eb55-4772-9904-7e6ae77ec941\" (UID: \"fba7e61c-eb55-4772-9904-7e6ae77ec941\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.870740 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.872947 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q" (OuterVolumeSpecName: "kube-api-access-mnz8q") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "kube-api-access-mnz8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.940750 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.944981 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.957991 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.958055 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.958082 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.958204 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.958233 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kg8cd\" (UniqueName: \"kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.958300 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom\") pod \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\" (UID: \"c53783e2-ed84-49c6-b688-9c6603a6c3b1\") " Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.959604 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.959626 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.959635 4967 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.959645 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnz8q\" (UniqueName: \"kubernetes.io/projected/fba7e61c-eb55-4772-9904-7e6ae77ec941-kube-api-access-mnz8q\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.974678 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:29 crc kubenswrapper[4967]: I1121 17:24:29.978932 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd" (OuterVolumeSpecName: "kube-api-access-kg8cd") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "kube-api-access-kg8cd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.041129 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data" (OuterVolumeSpecName: "config-data") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.067084 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kg8cd\" (UniqueName: \"kubernetes.io/projected/c53783e2-ed84-49c6-b688-9c6603a6c3b1-kube-api-access-kg8cd\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.067359 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.067425 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.070593 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.084941 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.087767 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fba7e61c-eb55-4772-9904-7e6ae77ec941" (UID: "fba7e61c-eb55-4772-9904-7e6ae77ec941"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.115897 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data" (OuterVolumeSpecName: "config-data") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.135146 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c53783e2-ed84-49c6-b688-9c6603a6c3b1" (UID: "c53783e2-ed84-49c6-b688-9c6603a6c3b1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.170208 4967 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.170244 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.170256 4967 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fba7e61c-eb55-4772-9904-7e6ae77ec941-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.170265 4967 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.170278 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c53783e2-ed84-49c6-b688-9c6603a6c3b1-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.536550 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:24:30 crc kubenswrapper[4967]: E1121 17:24:30.536849 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.631517 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-f74c7fcfc-6jdr9" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.633000 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" event={"ID":"c53783e2-ed84-49c6-b688-9c6603a6c3b1","Type":"ContainerDied","Data":"68fe86b4a739481af4db71d759271c3ac7d8209215d43bb5163bbc14df259bc2"} Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.633071 4967 scope.go:117] "RemoveContainer" containerID="38a92be94a6966320e3547c6cbaffa992d053ccbe6f193300d9dceb07407aad2" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.633210 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7d484d94c7-dntt2" Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.709651 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.734913 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-f74c7fcfc-6jdr9"] Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.744715 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 17:24:30 crc kubenswrapper[4967]: I1121 17:24:30.756570 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7d484d94c7-dntt2"] Nov 21 17:24:32 crc kubenswrapper[4967]: I1121 17:24:32.553841 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" path="/var/lib/kubelet/pods/c53783e2-ed84-49c6-b688-9c6603a6c3b1/volumes" Nov 21 17:24:32 crc kubenswrapper[4967]: I1121 17:24:32.556600 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fba7e61c-eb55-4772-9904-7e6ae77ec941" path="/var/lib/kubelet/pods/fba7e61c-eb55-4772-9904-7e6ae77ec941/volumes" Nov 21 17:24:33 crc kubenswrapper[4967]: I1121 17:24:33.888165 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-79b74c6887-kxssl" Nov 21 17:24:33 crc kubenswrapper[4967]: I1121 17:24:33.945397 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 17:24:33 crc kubenswrapper[4967]: I1121 17:24:33.945840 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-cf9748ff4-bql4m" podUID="93184620-a042-499a-bb5b-3d8719a73436" containerName="heat-engine" containerID="cri-o://550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" gracePeriod=60 Nov 21 17:24:36 crc kubenswrapper[4967]: E1121 17:24:36.414874 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:39 crc kubenswrapper[4967]: E1121 17:24:39.244510 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:41 crc kubenswrapper[4967]: I1121 17:24:41.537352 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:24:41 crc kubenswrapper[4967]: E1121 17:24:41.538216 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.014921 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.015817 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-api" containerID="cri-o://e9672fac08d1234a417c9e8ea5490050c3a0456fa4427eabb7ad5e53de328380" gracePeriod=30 Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.015890 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-listener" containerID="cri-o://4aef45e6425d707959a01717a237cf6880a470be4b876915a9fb2e2b178ecb31" gracePeriod=30 Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.015906 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-notifier" containerID="cri-o://da2b664446ba266ce5c348a07faad926f0043b1e13a753c3231fd9817093357a" gracePeriod=30 Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.015925 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-evaluator" containerID="cri-o://8fd455a14d307aaf1106040fbc7532d3bb3c2da66620b86b63286f6c678d6184" gracePeriod=30 Nov 21 17:24:43 crc kubenswrapper[4967]: E1121 17:24:43.657417 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 17:24:43 crc kubenswrapper[4967]: E1121 17:24:43.659145 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 17:24:43 crc kubenswrapper[4967]: E1121 17:24:43.660451 4967 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 21 17:24:43 crc kubenswrapper[4967]: E1121 17:24:43.660502 4967 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-cf9748ff4-bql4m" podUID="93184620-a042-499a-bb5b-3d8719a73436" containerName="heat-engine" Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.810930 4967 generic.go:334] "Generic (PLEG): container finished" podID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerID="8fd455a14d307aaf1106040fbc7532d3bb3c2da66620b86b63286f6c678d6184" exitCode=0 Nov 21 17:24:43 crc kubenswrapper[4967]: I1121 17:24:43.811017 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerDied","Data":"8fd455a14d307aaf1106040fbc7532d3bb3c2da66620b86b63286f6c678d6184"} Nov 21 17:24:45 crc kubenswrapper[4967]: I1121 17:24:45.022814 4967 scope.go:117] "RemoveContainer" containerID="e7a50f5b93a733956a7277975bb528d0b22dd89e5d74ffafc51a311fbe5d541a" Nov 21 17:24:46 crc kubenswrapper[4967]: E1121 17:24:46.815965 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849573 4967 generic.go:334] "Generic (PLEG): container finished" podID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerID="4aef45e6425d707959a01717a237cf6880a470be4b876915a9fb2e2b178ecb31" exitCode=0 Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849604 4967 generic.go:334] "Generic (PLEG): container finished" podID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerID="da2b664446ba266ce5c348a07faad926f0043b1e13a753c3231fd9817093357a" exitCode=0 Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849613 4967 generic.go:334] "Generic (PLEG): container finished" podID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerID="e9672fac08d1234a417c9e8ea5490050c3a0456fa4427eabb7ad5e53de328380" exitCode=0 Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849640 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerDied","Data":"4aef45e6425d707959a01717a237cf6880a470be4b876915a9fb2e2b178ecb31"} Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849685 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerDied","Data":"da2b664446ba266ce5c348a07faad926f0043b1e13a753c3231fd9817093357a"} Nov 21 17:24:46 crc kubenswrapper[4967]: I1121 17:24:46.849694 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerDied","Data":"e9672fac08d1234a417c9e8ea5490050c3a0456fa4427eabb7ad5e53de328380"} Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.703224 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.712275 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.805548 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.805836 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.805953 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom\") pod \"93184620-a042-499a-bb5b-3d8719a73436\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806012 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data\") pod \"93184620-a042-499a-bb5b-3d8719a73436\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806142 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhwwz\" (UniqueName: \"kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz\") pod \"93184620-a042-499a-bb5b-3d8719a73436\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806194 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle\") pod \"93184620-a042-499a-bb5b-3d8719a73436\" (UID: \"93184620-a042-499a-bb5b-3d8719a73436\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806218 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccnfq\" (UniqueName: \"kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806277 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806356 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.806416 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data\") pod \"52ed0309-117b-41b6-bbe8-dd345f306d79\" (UID: \"52ed0309-117b-41b6-bbe8-dd345f306d79\") " Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.829788 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts" (OuterVolumeSpecName: "scripts") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.832834 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz" (OuterVolumeSpecName: "kube-api-access-rhwwz") pod "93184620-a042-499a-bb5b-3d8719a73436" (UID: "93184620-a042-499a-bb5b-3d8719a73436"). InnerVolumeSpecName "kube-api-access-rhwwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.845215 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq" (OuterVolumeSpecName: "kube-api-access-ccnfq") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "kube-api-access-ccnfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.856497 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "93184620-a042-499a-bb5b-3d8719a73436" (UID: "93184620-a042-499a-bb5b-3d8719a73436"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.876088 4967 generic.go:334] "Generic (PLEG): container finished" podID="93184620-a042-499a-bb5b-3d8719a73436" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" exitCode=0 Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.876153 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-cf9748ff4-bql4m" event={"ID":"93184620-a042-499a-bb5b-3d8719a73436","Type":"ContainerDied","Data":"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b"} Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.876182 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-cf9748ff4-bql4m" event={"ID":"93184620-a042-499a-bb5b-3d8719a73436","Type":"ContainerDied","Data":"6bf6064d3b17657fd30d72a117968f00910f9e96a01e3bd7c188e62f25fc254d"} Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.876198 4967 scope.go:117] "RemoveContainer" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.876347 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-cf9748ff4-bql4m" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.896984 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"52ed0309-117b-41b6-bbe8-dd345f306d79","Type":"ContainerDied","Data":"0420700ff345951a107280a216a7f8d07648b93b509819974f7a30249522b709"} Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.897115 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.911848 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhwwz\" (UniqueName: \"kubernetes.io/projected/93184620-a042-499a-bb5b-3d8719a73436-kube-api-access-rhwwz\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.911894 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccnfq\" (UniqueName: \"kubernetes.io/projected/52ed0309-117b-41b6-bbe8-dd345f306d79-kube-api-access-ccnfq\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.911907 4967 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.911919 4967 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.933831 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93184620-a042-499a-bb5b-3d8719a73436" (UID: "93184620-a042-499a-bb5b-3d8719a73436"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.961650 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.975805 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:47 crc kubenswrapper[4967]: I1121 17:24:47.978874 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data" (OuterVolumeSpecName: "config-data") pod "93184620-a042-499a-bb5b-3d8719a73436" (UID: "93184620-a042-499a-bb5b-3d8719a73436"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.015589 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.020400 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93184620-a042-499a-bb5b-3d8719a73436-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.020449 4967 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.020466 4967 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.029723 4967 scope.go:117] "RemoveContainer" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.030168 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b\": container with ID starting with 550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b not found: ID does not exist" containerID="550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.030221 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b"} err="failed to get container status \"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b\": rpc error: code = NotFound desc = could not find container \"550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b\": container with ID starting with 550914ea0a9dc6c98be7d2f2ca1fe49f9deae5a24055e2d281f34756d6c47a2b not found: ID does not exist" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.030253 4967 scope.go:117] "RemoveContainer" containerID="4aef45e6425d707959a01717a237cf6880a470be4b876915a9fb2e2b178ecb31" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.076879 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.079057 4967 scope.go:117] "RemoveContainer" containerID="da2b664446ba266ce5c348a07faad926f0043b1e13a753c3231fd9817093357a" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.091283 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data" (OuterVolumeSpecName: "config-data") pod "52ed0309-117b-41b6-bbe8-dd345f306d79" (UID: "52ed0309-117b-41b6-bbe8-dd345f306d79"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.104044 4967 scope.go:117] "RemoveContainer" containerID="8fd455a14d307aaf1106040fbc7532d3bb3c2da66620b86b63286f6c678d6184" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.109007 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.114290 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.129595 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.129641 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ed0309-117b-41b6-bbe8-dd345f306d79-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.147674 4967 scope.go:117] "RemoveContainer" containerID="e9672fac08d1234a417c9e8ea5490050c3a0456fa4427eabb7ad5e53de328380" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.226015 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.245154 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-cf9748ff4-bql4m"] Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.264377 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.276105 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.291902 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292650 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-api" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292677 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-api" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292698 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-evaluator" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292704 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-evaluator" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292710 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93184620-a042-499a-bb5b-3d8719a73436" containerName="heat-engine" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292717 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="93184620-a042-499a-bb5b-3d8719a73436" containerName="heat-engine" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292740 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-listener" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292746 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-listener" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292770 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" containerName="aodh-db-sync" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292776 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" containerName="aodh-db-sync" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292797 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" containerName="heat-cfnapi" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292803 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" containerName="heat-cfnapi" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292822 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba7e61c-eb55-4772-9904-7e6ae77ec941" containerName="heat-api" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292828 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba7e61c-eb55-4772-9904-7e6ae77ec941" containerName="heat-api" Nov 21 17:24:48 crc kubenswrapper[4967]: E1121 17:24:48.292850 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-notifier" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.292857 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-notifier" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293146 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-evaluator" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293172 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-listener" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293181 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-api" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293193 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="c53783e2-ed84-49c6-b688-9c6603a6c3b1" containerName="heat-cfnapi" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293203 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="93184620-a042-499a-bb5b-3d8719a73436" containerName="heat-engine" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293219 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" containerName="aodh-notifier" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293233 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="fba7e61c-eb55-4772-9904-7e6ae77ec941" containerName="heat-api" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.293251 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" containerName="aodh-db-sync" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.295925 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.297675 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.298245 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-6bsgb" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.298408 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.298438 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.299710 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.306275 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439111 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-public-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439349 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-config-data\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439373 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-scripts\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439479 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-combined-ca-bundle\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439566 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbrdr\" (UniqueName: \"kubernetes.io/projected/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-kube-api-access-kbrdr\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.439594 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-internal-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542091 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbrdr\" (UniqueName: \"kubernetes.io/projected/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-kube-api-access-kbrdr\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542147 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-internal-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542212 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-public-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542357 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-config-data\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542652 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-scripts\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.542725 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-combined-ca-bundle\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.557133 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-scripts\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.560691 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-config-data\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.561889 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52ed0309-117b-41b6-bbe8-dd345f306d79" path="/var/lib/kubelet/pods/52ed0309-117b-41b6-bbe8-dd345f306d79/volumes" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.562980 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93184620-a042-499a-bb5b-3d8719a73436" path="/var/lib/kubelet/pods/93184620-a042-499a-bb5b-3d8719a73436/volumes" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.564265 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-public-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.564759 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-internal-tls-certs\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.565931 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbrdr\" (UniqueName: \"kubernetes.io/projected/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-kube-api-access-kbrdr\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.566764 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/067d67d5-6dd0-43b7-ab83-8765ac6f10ac-combined-ca-bundle\") pod \"aodh-0\" (UID: \"067d67d5-6dd0-43b7-ab83-8765ac6f10ac\") " pod="openstack/aodh-0" Nov 21 17:24:48 crc kubenswrapper[4967]: I1121 17:24:48.633013 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 21 17:24:49 crc kubenswrapper[4967]: I1121 17:24:49.168180 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 21 17:24:49 crc kubenswrapper[4967]: W1121 17:24:49.177086 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod067d67d5_6dd0_43b7_ab83_8765ac6f10ac.slice/crio-9af47ea1111ae0ab4fbc8768952c991aadc882b7f10d368ba6db0893836169a7 WatchSource:0}: Error finding container 9af47ea1111ae0ab4fbc8768952c991aadc882b7f10d368ba6db0893836169a7: Status 404 returned error can't find the container with id 9af47ea1111ae0ab4fbc8768952c991aadc882b7f10d368ba6db0893836169a7 Nov 21 17:24:49 crc kubenswrapper[4967]: I1121 17:24:49.942248 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"067d67d5-6dd0-43b7-ab83-8765ac6f10ac","Type":"ContainerStarted","Data":"738c07075a90c254455dc34a562bdc9b1d4894e07bcdd65af8164e27cf496242"} Nov 21 17:24:49 crc kubenswrapper[4967]: I1121 17:24:49.942883 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"067d67d5-6dd0-43b7-ab83-8765ac6f10ac","Type":"ContainerStarted","Data":"9af47ea1111ae0ab4fbc8768952c991aadc882b7f10d368ba6db0893836169a7"} Nov 21 17:24:51 crc kubenswrapper[4967]: I1121 17:24:51.967763 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"067d67d5-6dd0-43b7-ab83-8765ac6f10ac","Type":"ContainerStarted","Data":"0d89e84bac70376c0f7558b08ec5a7926637a491676dd297dee533b265a7ba05"} Nov 21 17:24:52 crc kubenswrapper[4967]: I1121 17:24:52.982535 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"067d67d5-6dd0-43b7-ab83-8765ac6f10ac","Type":"ContainerStarted","Data":"a630baf90173de7f57463c3c7c17d43571bab9276cf04cc614f6a44662e0ff19"} Nov 21 17:24:53 crc kubenswrapper[4967]: I1121 17:24:53.999137 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"067d67d5-6dd0-43b7-ab83-8765ac6f10ac","Type":"ContainerStarted","Data":"934f7ac1f90a3eee5fa9676a89e642de1945c2893cee20f26d650bd3cedba48a"} Nov 21 17:24:54 crc kubenswrapper[4967]: I1121 17:24:54.026581 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.934184515 podStartE2EDuration="6.026559969s" podCreationTimestamp="2025-11-21 17:24:48 +0000 UTC" firstStartedPulling="2025-11-21 17:24:49.180148408 +0000 UTC m=+6577.438669416" lastFinishedPulling="2025-11-21 17:24:53.272523862 +0000 UTC m=+6581.531044870" observedRunningTime="2025-11-21 17:24:54.022703269 +0000 UTC m=+6582.281224277" watchObservedRunningTime="2025-11-21 17:24:54.026559969 +0000 UTC m=+6582.285080977" Nov 21 17:24:54 crc kubenswrapper[4967]: E1121 17:24:54.556075 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:24:55 crc kubenswrapper[4967]: I1121 17:24:55.536933 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:24:55 crc kubenswrapper[4967]: E1121 17:24:55.537606 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:24:56 crc kubenswrapper[4967]: E1121 17:24:56.864134 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:25:06 crc kubenswrapper[4967]: E1121 17:25:06.921222 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:25:07 crc kubenswrapper[4967]: I1121 17:25:07.537130 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:25:07 crc kubenswrapper[4967]: E1121 17:25:07.538276 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:25:09 crc kubenswrapper[4967]: E1121 17:25:09.481712 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d018f81_8caf_43f2_8ce6_5a799aacde0d.slice/crio-cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:25:19 crc kubenswrapper[4967]: I1121 17:25:19.536830 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:25:19 crc kubenswrapper[4967]: E1121 17:25:19.537989 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:25:30 crc kubenswrapper[4967]: I1121 17:25:30.537455 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:25:30 crc kubenswrapper[4967]: E1121 17:25:30.539999 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:25:42 crc kubenswrapper[4967]: I1121 17:25:42.546945 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:25:42 crc kubenswrapper[4967]: E1121 17:25:42.550953 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:25:54 crc kubenswrapper[4967]: I1121 17:25:54.536098 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:25:54 crc kubenswrapper[4967]: E1121 17:25:54.536860 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:26:05 crc kubenswrapper[4967]: I1121 17:26:05.537054 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:26:05 crc kubenswrapper[4967]: E1121 17:26:05.537923 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:26:18 crc kubenswrapper[4967]: I1121 17:26:18.536139 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:26:18 crc kubenswrapper[4967]: E1121 17:26:18.537855 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:26:30 crc kubenswrapper[4967]: I1121 17:26:30.536204 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:26:30 crc kubenswrapper[4967]: E1121 17:26:30.537241 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:26:43 crc kubenswrapper[4967]: I1121 17:26:43.536902 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:26:43 crc kubenswrapper[4967]: E1121 17:26:43.537781 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:26:55 crc kubenswrapper[4967]: I1121 17:26:55.537090 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:26:55 crc kubenswrapper[4967]: E1121 17:26:55.538768 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:27:06 crc kubenswrapper[4967]: I1121 17:27:06.536129 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:27:06 crc kubenswrapper[4967]: E1121 17:27:06.537939 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:27:21 crc kubenswrapper[4967]: I1121 17:27:21.536197 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:27:21 crc kubenswrapper[4967]: E1121 17:27:21.537056 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:27:35 crc kubenswrapper[4967]: I1121 17:27:35.536045 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:27:35 crc kubenswrapper[4967]: E1121 17:27:35.536894 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:27:45 crc kubenswrapper[4967]: I1121 17:27:45.349657 4967 scope.go:117] "RemoveContainer" containerID="a241fdbe78b40644cb12fb6ed0d31bb1e1d98a74d69c67d0bd44735954269b25" Nov 21 17:27:45 crc kubenswrapper[4967]: I1121 17:27:45.421211 4967 scope.go:117] "RemoveContainer" containerID="7c3991ab8171407c256c85cb9efabefad310e585deaa2c6ba069e22fc580dd0b" Nov 21 17:27:45 crc kubenswrapper[4967]: I1121 17:27:45.478645 4967 scope.go:117] "RemoveContainer" containerID="e3d966b9db60bf1ecefa006399cb7e861796eb9a4b333994e2d6346de6e018eb" Nov 21 17:27:47 crc kubenswrapper[4967]: I1121 17:27:47.536844 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:27:47 crc kubenswrapper[4967]: E1121 17:27:47.537562 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:27:59 crc kubenswrapper[4967]: I1121 17:27:59.536857 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:27:59 crc kubenswrapper[4967]: E1121 17:27:59.537658 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:28:10 crc kubenswrapper[4967]: I1121 17:28:10.536516 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:28:10 crc kubenswrapper[4967]: E1121 17:28:10.537337 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:28:23 crc kubenswrapper[4967]: I1121 17:28:23.537135 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:28:24 crc kubenswrapper[4967]: I1121 17:28:24.709262 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527"} Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.311205 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r"] Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.315838 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.319084 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.320791 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.355653 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r"] Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.426510 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tw2d\" (UniqueName: \"kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.426883 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.427141 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.530727 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.530996 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.531162 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tw2d\" (UniqueName: \"kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.531838 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.540438 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.556013 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tw2d\" (UniqueName: \"kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d\") pod \"collect-profiles-29395770-56q2r\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:00 crc kubenswrapper[4967]: I1121 17:30:00.644687 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:01 crc kubenswrapper[4967]: I1121 17:30:01.520817 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r"] Nov 21 17:30:02 crc kubenswrapper[4967]: I1121 17:30:02.060881 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" event={"ID":"edf86dca-b83f-4527-96fc-e830c1d60bfa","Type":"ContainerStarted","Data":"46b3061e8a20288c95ee7ab1357d3775de3ff8151c8af7c432b91cfafdeb9a85"} Nov 21 17:30:02 crc kubenswrapper[4967]: I1121 17:30:02.060975 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" event={"ID":"edf86dca-b83f-4527-96fc-e830c1d60bfa","Type":"ContainerStarted","Data":"a4db608bd655783ff849b2b820cecd3e2e3cae185739d7d89f2a14dc01c5f9ec"} Nov 21 17:30:02 crc kubenswrapper[4967]: I1121 17:30:02.100476 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" podStartSLOduration=2.100440816 podStartE2EDuration="2.100440816s" podCreationTimestamp="2025-11-21 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:30:02.090114691 +0000 UTC m=+6890.348635739" watchObservedRunningTime="2025-11-21 17:30:02.100440816 +0000 UTC m=+6890.358961854" Nov 21 17:30:03 crc kubenswrapper[4967]: I1121 17:30:03.089498 4967 generic.go:334] "Generic (PLEG): container finished" podID="edf86dca-b83f-4527-96fc-e830c1d60bfa" containerID="46b3061e8a20288c95ee7ab1357d3775de3ff8151c8af7c432b91cfafdeb9a85" exitCode=0 Nov 21 17:30:03 crc kubenswrapper[4967]: I1121 17:30:03.091045 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" event={"ID":"edf86dca-b83f-4527-96fc-e830c1d60bfa","Type":"ContainerDied","Data":"46b3061e8a20288c95ee7ab1357d3775de3ff8151c8af7c432b91cfafdeb9a85"} Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.663105 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.694278 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume\") pod \"edf86dca-b83f-4527-96fc-e830c1d60bfa\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.695210 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tw2d\" (UniqueName: \"kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d\") pod \"edf86dca-b83f-4527-96fc-e830c1d60bfa\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.695438 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume\") pod \"edf86dca-b83f-4527-96fc-e830c1d60bfa\" (UID: \"edf86dca-b83f-4527-96fc-e830c1d60bfa\") " Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.695850 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume" (OuterVolumeSpecName: "config-volume") pod "edf86dca-b83f-4527-96fc-e830c1d60bfa" (UID: "edf86dca-b83f-4527-96fc-e830c1d60bfa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.696560 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/edf86dca-b83f-4527-96fc-e830c1d60bfa-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.706215 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "edf86dca-b83f-4527-96fc-e830c1d60bfa" (UID: "edf86dca-b83f-4527-96fc-e830c1d60bfa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.714002 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d" (OuterVolumeSpecName: "kube-api-access-2tw2d") pod "edf86dca-b83f-4527-96fc-e830c1d60bfa" (UID: "edf86dca-b83f-4527-96fc-e830c1d60bfa"). InnerVolumeSpecName "kube-api-access-2tw2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.800615 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/edf86dca-b83f-4527-96fc-e830c1d60bfa-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:30:04 crc kubenswrapper[4967]: I1121 17:30:04.800660 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tw2d\" (UniqueName: \"kubernetes.io/projected/edf86dca-b83f-4527-96fc-e830c1d60bfa-kube-api-access-2tw2d\") on node \"crc\" DevicePath \"\"" Nov 21 17:30:05 crc kubenswrapper[4967]: I1121 17:30:05.134254 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" event={"ID":"edf86dca-b83f-4527-96fc-e830c1d60bfa","Type":"ContainerDied","Data":"a4db608bd655783ff849b2b820cecd3e2e3cae185739d7d89f2a14dc01c5f9ec"} Nov 21 17:30:05 crc kubenswrapper[4967]: I1121 17:30:05.134691 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4db608bd655783ff849b2b820cecd3e2e3cae185739d7d89f2a14dc01c5f9ec" Nov 21 17:30:05 crc kubenswrapper[4967]: I1121 17:30:05.134343 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395770-56q2r" Nov 21 17:30:05 crc kubenswrapper[4967]: I1121 17:30:05.788183 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp"] Nov 21 17:30:05 crc kubenswrapper[4967]: I1121 17:30:05.799702 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395725-rw8dp"] Nov 21 17:30:06 crc kubenswrapper[4967]: I1121 17:30:06.569147 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06225c23-8c36-469d-93c0-61f838f3afb1" path="/var/lib/kubelet/pods/06225c23-8c36-469d-93c0-61f838f3afb1/volumes" Nov 21 17:30:45 crc kubenswrapper[4967]: I1121 17:30:45.618845 4967 scope.go:117] "RemoveContainer" containerID="d37d188eadefc9265d1ab36395fed01e7d9a39282cb8f886bbd8fbdff4398084" Nov 21 17:30:46 crc kubenswrapper[4967]: I1121 17:30:46.522417 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:30:46 crc kubenswrapper[4967]: I1121 17:30:46.522524 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.305827 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:30:56 crc kubenswrapper[4967]: E1121 17:30:56.306988 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edf86dca-b83f-4527-96fc-e830c1d60bfa" containerName="collect-profiles" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.307002 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="edf86dca-b83f-4527-96fc-e830c1d60bfa" containerName="collect-profiles" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.307253 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="edf86dca-b83f-4527-96fc-e830c1d60bfa" containerName="collect-profiles" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.316493 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.332293 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.332374 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.332418 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2dgj\" (UniqueName: \"kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.338724 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.435164 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.435273 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.435326 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2dgj\" (UniqueName: \"kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.435877 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.435935 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.460245 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2dgj\" (UniqueName: \"kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj\") pod \"redhat-marketplace-9vbfq\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:56 crc kubenswrapper[4967]: I1121 17:30:56.635433 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:30:57 crc kubenswrapper[4967]: I1121 17:30:57.134403 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:30:57 crc kubenswrapper[4967]: I1121 17:30:57.889758 4967 generic.go:334] "Generic (PLEG): container finished" podID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerID="2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c" exitCode=0 Nov 21 17:30:57 crc kubenswrapper[4967]: I1121 17:30:57.890122 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerDied","Data":"2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c"} Nov 21 17:30:57 crc kubenswrapper[4967]: I1121 17:30:57.890162 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerStarted","Data":"8ec21259caa17de728f62715390d21e9312fcdf01c9916db52e8b9339dc1bfc4"} Nov 21 17:30:57 crc kubenswrapper[4967]: I1121 17:30:57.897802 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 17:30:59 crc kubenswrapper[4967]: I1121 17:30:59.921186 4967 generic.go:334] "Generic (PLEG): container finished" podID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerID="630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93" exitCode=0 Nov 21 17:30:59 crc kubenswrapper[4967]: I1121 17:30:59.921584 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerDied","Data":"630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93"} Nov 21 17:31:00 crc kubenswrapper[4967]: I1121 17:31:00.968759 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerStarted","Data":"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43"} Nov 21 17:31:01 crc kubenswrapper[4967]: I1121 17:31:01.012632 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9vbfq" podStartSLOduration=2.556759566 podStartE2EDuration="5.012609118s" podCreationTimestamp="2025-11-21 17:30:56 +0000 UTC" firstStartedPulling="2025-11-21 17:30:57.893805669 +0000 UTC m=+6946.152326717" lastFinishedPulling="2025-11-21 17:31:00.349655251 +0000 UTC m=+6948.608176269" observedRunningTime="2025-11-21 17:31:01.008356176 +0000 UTC m=+6949.266877184" watchObservedRunningTime="2025-11-21 17:31:01.012609118 +0000 UTC m=+6949.271130126" Nov 21 17:31:06 crc kubenswrapper[4967]: I1121 17:31:06.635658 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:06 crc kubenswrapper[4967]: I1121 17:31:06.636299 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:06 crc kubenswrapper[4967]: I1121 17:31:06.690178 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:07 crc kubenswrapper[4967]: I1121 17:31:07.109297 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:07 crc kubenswrapper[4967]: I1121 17:31:07.168298 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.091155 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9vbfq" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="registry-server" containerID="cri-o://a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43" gracePeriod=2 Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.385269 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.390621 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.397744 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.407172 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.407632 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plf5b\" (UniqueName: \"kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.407711 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.513346 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plf5b\" (UniqueName: \"kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.514168 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.514732 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.515549 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.515733 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.538866 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plf5b\" (UniqueName: \"kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b\") pod \"certified-operators-vrw7c\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.730317 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.839697 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.925379 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content\") pod \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.927658 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities\") pod \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.927794 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2dgj\" (UniqueName: \"kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj\") pod \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\" (UID: \"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6\") " Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.930287 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities" (OuterVolumeSpecName: "utilities") pod "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" (UID: "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.937160 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj" (OuterVolumeSpecName: "kube-api-access-k2dgj") pod "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" (UID: "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6"). InnerVolumeSpecName "kube-api-access-k2dgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:31:09 crc kubenswrapper[4967]: I1121 17:31:09.952362 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" (UID: "4d88dc4f-557e-4eb9-913c-f2eaa631f8a6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.032953 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.032992 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2dgj\" (UniqueName: \"kubernetes.io/projected/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-kube-api-access-k2dgj\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.033007 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.106298 4967 generic.go:334] "Generic (PLEG): container finished" podID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerID="a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43" exitCode=0 Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.106388 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerDied","Data":"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43"} Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.106430 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9vbfq" event={"ID":"4d88dc4f-557e-4eb9-913c-f2eaa631f8a6","Type":"ContainerDied","Data":"8ec21259caa17de728f62715390d21e9312fcdf01c9916db52e8b9339dc1bfc4"} Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.106429 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9vbfq" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.106453 4967 scope.go:117] "RemoveContainer" containerID="a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.145117 4967 scope.go:117] "RemoveContainer" containerID="630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.153894 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.171342 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9vbfq"] Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.177074 4967 scope.go:117] "RemoveContainer" containerID="2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.202341 4967 scope.go:117] "RemoveContainer" containerID="a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43" Nov 21 17:31:10 crc kubenswrapper[4967]: E1121 17:31:10.204156 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43\": container with ID starting with a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43 not found: ID does not exist" containerID="a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.204279 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43"} err="failed to get container status \"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43\": rpc error: code = NotFound desc = could not find container \"a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43\": container with ID starting with a1f2c0718ffd9ff19c9fa95bc86cf61a69b483d36c32fffafa501eb117f20f43 not found: ID does not exist" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.204327 4967 scope.go:117] "RemoveContainer" containerID="630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93" Nov 21 17:31:10 crc kubenswrapper[4967]: E1121 17:31:10.204801 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93\": container with ID starting with 630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93 not found: ID does not exist" containerID="630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.204868 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93"} err="failed to get container status \"630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93\": rpc error: code = NotFound desc = could not find container \"630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93\": container with ID starting with 630f12f01dae6787c2aaf27cf3712babece44ab8bfb45f48645187fbd7b44c93 not found: ID does not exist" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.204916 4967 scope.go:117] "RemoveContainer" containerID="2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c" Nov 21 17:31:10 crc kubenswrapper[4967]: E1121 17:31:10.205341 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c\": container with ID starting with 2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c not found: ID does not exist" containerID="2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.205375 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c"} err="failed to get container status \"2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c\": rpc error: code = NotFound desc = could not find container \"2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c\": container with ID starting with 2d39ff83fae9a0c4d9b0aeb47dc8c950ae70d3fda76b42ccefdf72e649fcad9c not found: ID does not exist" Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.282018 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:10 crc kubenswrapper[4967]: I1121 17:31:10.552267 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" path="/var/lib/kubelet/pods/4d88dc4f-557e-4eb9-913c-f2eaa631f8a6/volumes" Nov 21 17:31:11 crc kubenswrapper[4967]: I1121 17:31:11.123933 4967 generic.go:334] "Generic (PLEG): container finished" podID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerID="2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a" exitCode=0 Nov 21 17:31:11 crc kubenswrapper[4967]: I1121 17:31:11.124012 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerDied","Data":"2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a"} Nov 21 17:31:11 crc kubenswrapper[4967]: I1121 17:31:11.124479 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerStarted","Data":"adf3d66df1619991ccb9e7e19fa7d11d0db515efba1ec6b79d041d43c9761c23"} Nov 21 17:31:12 crc kubenswrapper[4967]: I1121 17:31:12.140915 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerStarted","Data":"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972"} Nov 21 17:31:13 crc kubenswrapper[4967]: I1121 17:31:13.155131 4967 generic.go:334] "Generic (PLEG): container finished" podID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerID="dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972" exitCode=0 Nov 21 17:31:13 crc kubenswrapper[4967]: I1121 17:31:13.155248 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerDied","Data":"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972"} Nov 21 17:31:14 crc kubenswrapper[4967]: I1121 17:31:14.170191 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerStarted","Data":"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3"} Nov 21 17:31:14 crc kubenswrapper[4967]: I1121 17:31:14.197591 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vrw7c" podStartSLOduration=2.787943637 podStartE2EDuration="5.197571023s" podCreationTimestamp="2025-11-21 17:31:09 +0000 UTC" firstStartedPulling="2025-11-21 17:31:11.127820009 +0000 UTC m=+6959.386341047" lastFinishedPulling="2025-11-21 17:31:13.537447425 +0000 UTC m=+6961.795968433" observedRunningTime="2025-11-21 17:31:14.188803112 +0000 UTC m=+6962.447324120" watchObservedRunningTime="2025-11-21 17:31:14.197571023 +0000 UTC m=+6962.456092021" Nov 21 17:31:16 crc kubenswrapper[4967]: I1121 17:31:16.522497 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:31:16 crc kubenswrapper[4967]: I1121 17:31:16.522911 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:31:19 crc kubenswrapper[4967]: I1121 17:31:19.730892 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:19 crc kubenswrapper[4967]: I1121 17:31:19.731734 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:19 crc kubenswrapper[4967]: I1121 17:31:19.789030 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:20 crc kubenswrapper[4967]: I1121 17:31:20.331210 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:20 crc kubenswrapper[4967]: I1121 17:31:20.404104 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.264268 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-vrw7c" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="registry-server" containerID="cri-o://71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3" gracePeriod=2 Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.812124 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.912889 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities\") pod \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.913253 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plf5b\" (UniqueName: \"kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b\") pod \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.913307 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content\") pod \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\" (UID: \"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d\") " Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.914300 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities" (OuterVolumeSpecName: "utilities") pod "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" (UID: "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.920232 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b" (OuterVolumeSpecName: "kube-api-access-plf5b") pod "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" (UID: "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d"). InnerVolumeSpecName "kube-api-access-plf5b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:31:22 crc kubenswrapper[4967]: I1121 17:31:22.962211 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" (UID: "d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.016202 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plf5b\" (UniqueName: \"kubernetes.io/projected/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-kube-api-access-plf5b\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.016244 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.016253 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.279084 4967 generic.go:334] "Generic (PLEG): container finished" podID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerID="71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3" exitCode=0 Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.279146 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerDied","Data":"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3"} Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.279202 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vrw7c" event={"ID":"d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d","Type":"ContainerDied","Data":"adf3d66df1619991ccb9e7e19fa7d11d0db515efba1ec6b79d041d43c9761c23"} Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.279221 4967 scope.go:117] "RemoveContainer" containerID="71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.279219 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vrw7c" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.306803 4967 scope.go:117] "RemoveContainer" containerID="dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.335520 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.345096 4967 scope.go:117] "RemoveContainer" containerID="2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.345931 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-vrw7c"] Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.419587 4967 scope.go:117] "RemoveContainer" containerID="71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3" Nov 21 17:31:23 crc kubenswrapper[4967]: E1121 17:31:23.420765 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3\": container with ID starting with 71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3 not found: ID does not exist" containerID="71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.420849 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3"} err="failed to get container status \"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3\": rpc error: code = NotFound desc = could not find container \"71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3\": container with ID starting with 71d762dc7f363ea6c50e93934d0fbcefd6fab9f84041fb90b1ecac98ec0027a3 not found: ID does not exist" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.420894 4967 scope.go:117] "RemoveContainer" containerID="dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972" Nov 21 17:31:23 crc kubenswrapper[4967]: E1121 17:31:23.421483 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972\": container with ID starting with dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972 not found: ID does not exist" containerID="dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.421543 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972"} err="failed to get container status \"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972\": rpc error: code = NotFound desc = could not find container \"dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972\": container with ID starting with dfdd83326cf95b21d28b0cee0f3e2267ef4d94ec8ed753e973f03cc3ded66972 not found: ID does not exist" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.421575 4967 scope.go:117] "RemoveContainer" containerID="2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a" Nov 21 17:31:23 crc kubenswrapper[4967]: E1121 17:31:23.422030 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a\": container with ID starting with 2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a not found: ID does not exist" containerID="2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a" Nov 21 17:31:23 crc kubenswrapper[4967]: I1121 17:31:23.422117 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a"} err="failed to get container status \"2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a\": rpc error: code = NotFound desc = could not find container \"2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a\": container with ID starting with 2f79bfbc68e2353c805c20bb0c209cc60c06814733d2ea21548eb8550305122a not found: ID does not exist" Nov 21 17:31:24 crc kubenswrapper[4967]: I1121 17:31:24.550181 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" path="/var/lib/kubelet/pods/d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d/volumes" Nov 21 17:31:46 crc kubenswrapper[4967]: I1121 17:31:46.522430 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:31:46 crc kubenswrapper[4967]: I1121 17:31:46.523479 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:31:46 crc kubenswrapper[4967]: I1121 17:31:46.523580 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:31:46 crc kubenswrapper[4967]: I1121 17:31:46.525261 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:31:46 crc kubenswrapper[4967]: I1121 17:31:46.525462 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527" gracePeriod=600 Nov 21 17:31:47 crc kubenswrapper[4967]: I1121 17:31:47.592541 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527" exitCode=0 Nov 21 17:31:47 crc kubenswrapper[4967]: I1121 17:31:47.592676 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527"} Nov 21 17:31:47 crc kubenswrapper[4967]: I1121 17:31:47.593331 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce"} Nov 21 17:31:47 crc kubenswrapper[4967]: I1121 17:31:47.593372 4967 scope.go:117] "RemoveContainer" containerID="57a63116e1b7199a677d076261140491145ec880d766848073f176fcb09d953b" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.111705 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.114390 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="extract-utilities" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.114530 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="extract-utilities" Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.114814 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="extract-content" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.114896 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="extract-content" Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.114986 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="extract-content" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.115072 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="extract-content" Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.115187 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.115291 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.115409 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.115494 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: E1121 17:32:27.115620 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="extract-utilities" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.115721 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="extract-utilities" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.116371 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5db5581-dfe7-4dbe-8b82-dd0f1dfead5d" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.116560 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d88dc4f-557e-4eb9-913c-f2eaa631f8a6" containerName="registry-server" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.121711 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.139979 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.199748 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.199842 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pndqz\" (UniqueName: \"kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.199911 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.302165 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pndqz\" (UniqueName: \"kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.302527 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.302805 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.303290 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.303421 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.324261 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pndqz\" (UniqueName: \"kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz\") pod \"redhat-operators-8gntf\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:27 crc kubenswrapper[4967]: I1121 17:32:27.457289 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:28 crc kubenswrapper[4967]: I1121 17:32:28.040518 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:28 crc kubenswrapper[4967]: I1121 17:32:28.187557 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerStarted","Data":"ec63dbf2e2a2e212de6d421141ce3ff375346a053526b8508a45cfdf090cf9e5"} Nov 21 17:32:29 crc kubenswrapper[4967]: I1121 17:32:29.204471 4967 generic.go:334] "Generic (PLEG): container finished" podID="38892323-fcc2-4b58-823a-6247ab6b6434" containerID="46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6" exitCode=0 Nov 21 17:32:29 crc kubenswrapper[4967]: I1121 17:32:29.204774 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerDied","Data":"46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6"} Nov 21 17:32:31 crc kubenswrapper[4967]: I1121 17:32:31.252396 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerStarted","Data":"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee"} Nov 21 17:32:34 crc kubenswrapper[4967]: I1121 17:32:34.318674 4967 generic.go:334] "Generic (PLEG): container finished" podID="38892323-fcc2-4b58-823a-6247ab6b6434" containerID="a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee" exitCode=0 Nov 21 17:32:34 crc kubenswrapper[4967]: I1121 17:32:34.318735 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerDied","Data":"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee"} Nov 21 17:32:35 crc kubenswrapper[4967]: I1121 17:32:35.355647 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerStarted","Data":"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129"} Nov 21 17:32:35 crc kubenswrapper[4967]: I1121 17:32:35.398194 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8gntf" podStartSLOduration=2.706715847 podStartE2EDuration="8.398153033s" podCreationTimestamp="2025-11-21 17:32:27 +0000 UTC" firstStartedPulling="2025-11-21 17:32:29.208227009 +0000 UTC m=+7037.466748017" lastFinishedPulling="2025-11-21 17:32:34.899664195 +0000 UTC m=+7043.158185203" observedRunningTime="2025-11-21 17:32:35.380389254 +0000 UTC m=+7043.638910282" watchObservedRunningTime="2025-11-21 17:32:35.398153033 +0000 UTC m=+7043.656674061" Nov 21 17:32:37 crc kubenswrapper[4967]: I1121 17:32:37.457829 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:37 crc kubenswrapper[4967]: I1121 17:32:37.461184 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:38 crc kubenswrapper[4967]: I1121 17:32:38.539332 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8gntf" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="registry-server" probeResult="failure" output=< Nov 21 17:32:38 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:32:38 crc kubenswrapper[4967]: > Nov 21 17:32:47 crc kubenswrapper[4967]: I1121 17:32:47.538592 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:47 crc kubenswrapper[4967]: I1121 17:32:47.612813 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.180282 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.194715 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.199552 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.262514 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.263500 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.263652 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcpnn\" (UniqueName: \"kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.365869 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.365944 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.365978 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcpnn\" (UniqueName: \"kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.366890 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.367295 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.392535 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcpnn\" (UniqueName: \"kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn\") pod \"community-operators-vpb4k\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:48 crc kubenswrapper[4967]: I1121 17:32:48.532480 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.092271 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:32:49 crc kubenswrapper[4967]: W1121 17:32:49.094746 4967 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86df0971_a593_4fc4_9400_2e877ed55211.slice/crio-18c876e83f16e64a89faf23c88111a8a230c4e8b03bae0e7264ebf680a9ba312 WatchSource:0}: Error finding container 18c876e83f16e64a89faf23c88111a8a230c4e8b03bae0e7264ebf680a9ba312: Status 404 returned error can't find the container with id 18c876e83f16e64a89faf23c88111a8a230c4e8b03bae0e7264ebf680a9ba312 Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.616487 4967 generic.go:334] "Generic (PLEG): container finished" podID="86df0971-a593-4fc4-9400-2e877ed55211" containerID="ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651" exitCode=0 Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.616889 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerDied","Data":"ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651"} Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.616997 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerStarted","Data":"18c876e83f16e64a89faf23c88111a8a230c4e8b03bae0e7264ebf680a9ba312"} Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.946570 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:49 crc kubenswrapper[4967]: I1121 17:32:49.948042 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8gntf" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="registry-server" containerID="cri-o://0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129" gracePeriod=2 Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.466939 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.519273 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pndqz\" (UniqueName: \"kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz\") pod \"38892323-fcc2-4b58-823a-6247ab6b6434\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.519420 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities\") pod \"38892323-fcc2-4b58-823a-6247ab6b6434\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.519457 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content\") pod \"38892323-fcc2-4b58-823a-6247ab6b6434\" (UID: \"38892323-fcc2-4b58-823a-6247ab6b6434\") " Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.524012 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities" (OuterVolumeSpecName: "utilities") pod "38892323-fcc2-4b58-823a-6247ab6b6434" (UID: "38892323-fcc2-4b58-823a-6247ab6b6434"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.532803 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz" (OuterVolumeSpecName: "kube-api-access-pndqz") pod "38892323-fcc2-4b58-823a-6247ab6b6434" (UID: "38892323-fcc2-4b58-823a-6247ab6b6434"). InnerVolumeSpecName "kube-api-access-pndqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.600504 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "38892323-fcc2-4b58-823a-6247ab6b6434" (UID: "38892323-fcc2-4b58-823a-6247ab6b6434"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.623865 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pndqz\" (UniqueName: \"kubernetes.io/projected/38892323-fcc2-4b58-823a-6247ab6b6434-kube-api-access-pndqz\") on node \"crc\" DevicePath \"\"" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.623897 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.623908 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/38892323-fcc2-4b58-823a-6247ab6b6434-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.633377 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerStarted","Data":"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553"} Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.637165 4967 generic.go:334] "Generic (PLEG): container finished" podID="38892323-fcc2-4b58-823a-6247ab6b6434" containerID="0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129" exitCode=0 Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.637216 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerDied","Data":"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129"} Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.637228 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8gntf" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.637274 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8gntf" event={"ID":"38892323-fcc2-4b58-823a-6247ab6b6434","Type":"ContainerDied","Data":"ec63dbf2e2a2e212de6d421141ce3ff375346a053526b8508a45cfdf090cf9e5"} Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.637321 4967 scope.go:117] "RemoveContainer" containerID="0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.677434 4967 scope.go:117] "RemoveContainer" containerID="a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.684307 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.698428 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8gntf"] Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.710500 4967 scope.go:117] "RemoveContainer" containerID="46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.767136 4967 scope.go:117] "RemoveContainer" containerID="0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129" Nov 21 17:32:50 crc kubenswrapper[4967]: E1121 17:32:50.767908 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129\": container with ID starting with 0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129 not found: ID does not exist" containerID="0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.767953 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129"} err="failed to get container status \"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129\": rpc error: code = NotFound desc = could not find container \"0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129\": container with ID starting with 0089f2f4a8f0fac34a30a1e68956fc51ac0931d100ca227d6bbbbb641ac25129 not found: ID does not exist" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.767991 4967 scope.go:117] "RemoveContainer" containerID="a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee" Nov 21 17:32:50 crc kubenswrapper[4967]: E1121 17:32:50.769137 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee\": container with ID starting with a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee not found: ID does not exist" containerID="a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.769209 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee"} err="failed to get container status \"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee\": rpc error: code = NotFound desc = could not find container \"a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee\": container with ID starting with a3422bd88360519369324b2c3e7bdba839193eb5089729298ce2e62cc1260fee not found: ID does not exist" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.769256 4967 scope.go:117] "RemoveContainer" containerID="46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6" Nov 21 17:32:50 crc kubenswrapper[4967]: E1121 17:32:50.769896 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6\": container with ID starting with 46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6 not found: ID does not exist" containerID="46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6" Nov 21 17:32:50 crc kubenswrapper[4967]: I1121 17:32:50.769922 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6"} err="failed to get container status \"46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6\": rpc error: code = NotFound desc = could not find container \"46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6\": container with ID starting with 46a20dfd5aaf11d143cf44e9fcf572a6b2a602ed7fce0bb6e707f26ae69697a6 not found: ID does not exist" Nov 21 17:32:52 crc kubenswrapper[4967]: I1121 17:32:52.563055 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" path="/var/lib/kubelet/pods/38892323-fcc2-4b58-823a-6247ab6b6434/volumes" Nov 21 17:32:52 crc kubenswrapper[4967]: I1121 17:32:52.664560 4967 generic.go:334] "Generic (PLEG): container finished" podID="86df0971-a593-4fc4-9400-2e877ed55211" containerID="3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553" exitCode=0 Nov 21 17:32:52 crc kubenswrapper[4967]: I1121 17:32:52.664625 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerDied","Data":"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553"} Nov 21 17:32:53 crc kubenswrapper[4967]: I1121 17:32:53.679173 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerStarted","Data":"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370"} Nov 21 17:32:53 crc kubenswrapper[4967]: I1121 17:32:53.716888 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vpb4k" podStartSLOduration=2.166064383 podStartE2EDuration="5.716845566s" podCreationTimestamp="2025-11-21 17:32:48 +0000 UTC" firstStartedPulling="2025-11-21 17:32:49.618938322 +0000 UTC m=+7057.877459330" lastFinishedPulling="2025-11-21 17:32:53.169719505 +0000 UTC m=+7061.428240513" observedRunningTime="2025-11-21 17:32:53.698266984 +0000 UTC m=+7061.956788032" watchObservedRunningTime="2025-11-21 17:32:53.716845566 +0000 UTC m=+7061.975366614" Nov 21 17:32:58 crc kubenswrapper[4967]: I1121 17:32:58.533213 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:58 crc kubenswrapper[4967]: I1121 17:32:58.534256 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:58 crc kubenswrapper[4967]: I1121 17:32:58.607905 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:58 crc kubenswrapper[4967]: I1121 17:32:58.836709 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:32:58 crc kubenswrapper[4967]: I1121 17:32:58.921021 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:33:00 crc kubenswrapper[4967]: I1121 17:33:00.777048 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vpb4k" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="registry-server" containerID="cri-o://28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370" gracePeriod=2 Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.343512 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.460367 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcpnn\" (UniqueName: \"kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn\") pod \"86df0971-a593-4fc4-9400-2e877ed55211\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.460553 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities\") pod \"86df0971-a593-4fc4-9400-2e877ed55211\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.460803 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content\") pod \"86df0971-a593-4fc4-9400-2e877ed55211\" (UID: \"86df0971-a593-4fc4-9400-2e877ed55211\") " Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.461849 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities" (OuterVolumeSpecName: "utilities") pod "86df0971-a593-4fc4-9400-2e877ed55211" (UID: "86df0971-a593-4fc4-9400-2e877ed55211"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.472893 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn" (OuterVolumeSpecName: "kube-api-access-jcpnn") pod "86df0971-a593-4fc4-9400-2e877ed55211" (UID: "86df0971-a593-4fc4-9400-2e877ed55211"). InnerVolumeSpecName "kube-api-access-jcpnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.548077 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86df0971-a593-4fc4-9400-2e877ed55211" (UID: "86df0971-a593-4fc4-9400-2e877ed55211"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.563797 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.563828 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcpnn\" (UniqueName: \"kubernetes.io/projected/86df0971-a593-4fc4-9400-2e877ed55211-kube-api-access-jcpnn\") on node \"crc\" DevicePath \"\"" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.563839 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86df0971-a593-4fc4-9400-2e877ed55211-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.803852 4967 generic.go:334] "Generic (PLEG): container finished" podID="86df0971-a593-4fc4-9400-2e877ed55211" containerID="28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370" exitCode=0 Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.803910 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerDied","Data":"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370"} Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.803935 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vpb4k" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.803947 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vpb4k" event={"ID":"86df0971-a593-4fc4-9400-2e877ed55211","Type":"ContainerDied","Data":"18c876e83f16e64a89faf23c88111a8a230c4e8b03bae0e7264ebf680a9ba312"} Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.803967 4967 scope.go:117] "RemoveContainer" containerID="28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.852513 4967 scope.go:117] "RemoveContainer" containerID="3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.854413 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.867812 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vpb4k"] Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.888189 4967 scope.go:117] "RemoveContainer" containerID="ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.958221 4967 scope.go:117] "RemoveContainer" containerID="28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370" Nov 21 17:33:01 crc kubenswrapper[4967]: E1121 17:33:01.958966 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370\": container with ID starting with 28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370 not found: ID does not exist" containerID="28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.959074 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370"} err="failed to get container status \"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370\": rpc error: code = NotFound desc = could not find container \"28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370\": container with ID starting with 28729c3f096ce71ba27d43c27e9c0e5bc9a118707e98ac722e061d8a0f6fe370 not found: ID does not exist" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.959162 4967 scope.go:117] "RemoveContainer" containerID="3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553" Nov 21 17:33:01 crc kubenswrapper[4967]: E1121 17:33:01.959745 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553\": container with ID starting with 3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553 not found: ID does not exist" containerID="3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.959846 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553"} err="failed to get container status \"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553\": rpc error: code = NotFound desc = could not find container \"3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553\": container with ID starting with 3d029b2d84fbde79e1d76599e7478a64b3decc46e3f1f071c4a164a6120c7553 not found: ID does not exist" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.959934 4967 scope.go:117] "RemoveContainer" containerID="ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651" Nov 21 17:33:01 crc kubenswrapper[4967]: E1121 17:33:01.960449 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651\": container with ID starting with ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651 not found: ID does not exist" containerID="ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651" Nov 21 17:33:01 crc kubenswrapper[4967]: I1121 17:33:01.960562 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651"} err="failed to get container status \"ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651\": rpc error: code = NotFound desc = could not find container \"ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651\": container with ID starting with ecb5f61354f7a12612daea734f5f1747f6fe4aa3b618b89a1d4c0583034e1651 not found: ID does not exist" Nov 21 17:33:02 crc kubenswrapper[4967]: I1121 17:33:02.553631 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86df0971-a593-4fc4-9400-2e877ed55211" path="/var/lib/kubelet/pods/86df0971-a593-4fc4-9400-2e877ed55211/volumes" Nov 21 17:33:46 crc kubenswrapper[4967]: I1121 17:33:46.522299 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:33:46 crc kubenswrapper[4967]: I1121 17:33:46.523343 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:34:12 crc kubenswrapper[4967]: I1121 17:34:12.094489 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-klvmk"] Nov 21 17:34:12 crc kubenswrapper[4967]: I1121 17:34:12.111139 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-klvmk"] Nov 21 17:34:12 crc kubenswrapper[4967]: I1121 17:34:12.576041 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36a77740-439b-489e-ae8b-d4f675be9f14" path="/var/lib/kubelet/pods/36a77740-439b-489e-ae8b-d4f675be9f14/volumes" Nov 21 17:34:15 crc kubenswrapper[4967]: I1121 17:34:15.033871 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-qkxlx"] Nov 21 17:34:15 crc kubenswrapper[4967]: I1121 17:34:15.047963 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-qkxlx"] Nov 21 17:34:16 crc kubenswrapper[4967]: I1121 17:34:16.522477 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:34:16 crc kubenswrapper[4967]: I1121 17:34:16.522836 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:34:16 crc kubenswrapper[4967]: I1121 17:34:16.551191 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d018f81-8caf-43f2-8ce6-5a799aacde0d" path="/var/lib/kubelet/pods/6d018f81-8caf-43f2-8ce6-5a799aacde0d/volumes" Nov 21 17:34:45 crc kubenswrapper[4967]: I1121 17:34:45.920478 4967 scope.go:117] "RemoveContainer" containerID="cc31cf1fb3a858eeda7bd10eebe0fe4cc860411e5244200af94e3e6116c09bdd" Nov 21 17:34:45 crc kubenswrapper[4967]: I1121 17:34:45.957057 4967 scope.go:117] "RemoveContainer" containerID="fdf839b7cb225643cb400e343664299a4d3e13d27f1a54f375d9e9972cad29be" Nov 21 17:34:46 crc kubenswrapper[4967]: I1121 17:34:46.522504 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:34:46 crc kubenswrapper[4967]: I1121 17:34:46.523395 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:34:46 crc kubenswrapper[4967]: I1121 17:34:46.523727 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:34:46 crc kubenswrapper[4967]: I1121 17:34:46.525639 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:34:46 crc kubenswrapper[4967]: I1121 17:34:46.525938 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" gracePeriod=600 Nov 21 17:34:46 crc kubenswrapper[4967]: E1121 17:34:46.668220 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:34:47 crc kubenswrapper[4967]: I1121 17:34:47.203207 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" exitCode=0 Nov 21 17:34:47 crc kubenswrapper[4967]: I1121 17:34:47.203255 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce"} Nov 21 17:34:47 crc kubenswrapper[4967]: I1121 17:34:47.203294 4967 scope.go:117] "RemoveContainer" containerID="98d7074e2f611728e5a01ed6ef34ab49d8ea79c2581f7fcc491b941813737527" Nov 21 17:34:47 crc kubenswrapper[4967]: I1121 17:34:47.204360 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:34:47 crc kubenswrapper[4967]: E1121 17:34:47.204864 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:00 crc kubenswrapper[4967]: I1121 17:35:00.536895 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:00 crc kubenswrapper[4967]: E1121 17:35:00.538081 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:11 crc kubenswrapper[4967]: I1121 17:35:11.536908 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:11 crc kubenswrapper[4967]: E1121 17:35:11.537866 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:22 crc kubenswrapper[4967]: I1121 17:35:22.546834 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:22 crc kubenswrapper[4967]: E1121 17:35:22.547717 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:34 crc kubenswrapper[4967]: I1121 17:35:34.536455 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:34 crc kubenswrapper[4967]: E1121 17:35:34.537819 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:45 crc kubenswrapper[4967]: I1121 17:35:45.537684 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:45 crc kubenswrapper[4967]: E1121 17:35:45.538954 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:35:58 crc kubenswrapper[4967]: I1121 17:35:58.536653 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:35:58 crc kubenswrapper[4967]: E1121 17:35:58.537889 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:36:13 crc kubenswrapper[4967]: I1121 17:36:13.537903 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:36:13 crc kubenswrapper[4967]: E1121 17:36:13.541751 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:36:24 crc kubenswrapper[4967]: I1121 17:36:24.538461 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:36:24 crc kubenswrapper[4967]: E1121 17:36:24.539799 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:36:38 crc kubenswrapper[4967]: I1121 17:36:38.538348 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:36:38 crc kubenswrapper[4967]: E1121 17:36:38.539494 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:36:51 crc kubenswrapper[4967]: I1121 17:36:51.536693 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:36:51 crc kubenswrapper[4967]: E1121 17:36:51.538247 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:37:05 crc kubenswrapper[4967]: I1121 17:37:05.536609 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:37:05 crc kubenswrapper[4967]: E1121 17:37:05.538365 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:37:17 crc kubenswrapper[4967]: I1121 17:37:17.537426 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:37:17 crc kubenswrapper[4967]: E1121 17:37:17.538811 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:37:32 crc kubenswrapper[4967]: I1121 17:37:32.548958 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:37:32 crc kubenswrapper[4967]: E1121 17:37:32.551504 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:37:46 crc kubenswrapper[4967]: I1121 17:37:46.536758 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:37:46 crc kubenswrapper[4967]: E1121 17:37:46.538116 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:37:59 crc kubenswrapper[4967]: I1121 17:37:59.537686 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:37:59 crc kubenswrapper[4967]: E1121 17:37:59.538580 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:38:10 crc kubenswrapper[4967]: I1121 17:38:10.538075 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:38:10 crc kubenswrapper[4967]: E1121 17:38:10.539061 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:38:21 crc kubenswrapper[4967]: I1121 17:38:21.537428 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:38:21 crc kubenswrapper[4967]: E1121 17:38:21.538543 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:38:33 crc kubenswrapper[4967]: I1121 17:38:33.537014 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:38:33 crc kubenswrapper[4967]: E1121 17:38:33.538187 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:38:46 crc kubenswrapper[4967]: I1121 17:38:46.537294 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:38:46 crc kubenswrapper[4967]: E1121 17:38:46.538575 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:38:57 crc kubenswrapper[4967]: I1121 17:38:57.539742 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:38:57 crc kubenswrapper[4967]: E1121 17:38:57.540733 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:39:08 crc kubenswrapper[4967]: I1121 17:39:08.543157 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:39:08 crc kubenswrapper[4967]: E1121 17:39:08.544884 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:39:21 crc kubenswrapper[4967]: I1121 17:39:21.538175 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:39:21 crc kubenswrapper[4967]: E1121 17:39:21.541146 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:39:32 crc kubenswrapper[4967]: I1121 17:39:32.552777 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:39:32 crc kubenswrapper[4967]: E1121 17:39:32.554272 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:39:44 crc kubenswrapper[4967]: I1121 17:39:44.537046 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:39:44 crc kubenswrapper[4967]: E1121 17:39:44.538629 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:39:56 crc kubenswrapper[4967]: I1121 17:39:56.538574 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:39:57 crc kubenswrapper[4967]: I1121 17:39:57.877467 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91"} Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.494301 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.500070 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="extract-content" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.500392 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="extract-content" Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.500597 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.500730 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.501919 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="extract-content" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.502124 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="extract-content" Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.502299 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.502484 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.502636 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="extract-utilities" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.502764 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="extract-utilities" Nov 21 17:41:14 crc kubenswrapper[4967]: E1121 17:41:14.502928 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="extract-utilities" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.503055 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="extract-utilities" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.503771 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="38892323-fcc2-4b58-823a-6247ab6b6434" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.503987 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="86df0971-a593-4fc4-9400-2e877ed55211" containerName="registry-server" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.507862 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.512281 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.685047 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hns6d\" (UniqueName: \"kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.685154 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.685235 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.788575 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hns6d\" (UniqueName: \"kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.790388 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.790898 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.791056 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.791331 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.815832 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hns6d\" (UniqueName: \"kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d\") pod \"certified-operators-8ck77\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:14 crc kubenswrapper[4967]: I1121 17:41:14.852584 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:15 crc kubenswrapper[4967]: I1121 17:41:15.400239 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:16 crc kubenswrapper[4967]: I1121 17:41:16.145735 4967 generic.go:334] "Generic (PLEG): container finished" podID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerID="fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf" exitCode=0 Nov 21 17:41:16 crc kubenswrapper[4967]: I1121 17:41:16.146234 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerDied","Data":"fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf"} Nov 21 17:41:16 crc kubenswrapper[4967]: I1121 17:41:16.146440 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerStarted","Data":"04d230c0c1b62ea2d9e3799e02df3c0c4d33ff06cbb1dda893713ccb4f8ea6e0"} Nov 21 17:41:16 crc kubenswrapper[4967]: I1121 17:41:16.148903 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 17:41:17 crc kubenswrapper[4967]: I1121 17:41:17.163932 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerStarted","Data":"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea"} Nov 21 17:41:19 crc kubenswrapper[4967]: I1121 17:41:19.195271 4967 generic.go:334] "Generic (PLEG): container finished" podID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerID="b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea" exitCode=0 Nov 21 17:41:19 crc kubenswrapper[4967]: I1121 17:41:19.195388 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerDied","Data":"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea"} Nov 21 17:41:20 crc kubenswrapper[4967]: I1121 17:41:20.215096 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerStarted","Data":"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60"} Nov 21 17:41:20 crc kubenswrapper[4967]: I1121 17:41:20.250454 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8ck77" podStartSLOduration=2.790477381 podStartE2EDuration="6.250418723s" podCreationTimestamp="2025-11-21 17:41:14 +0000 UTC" firstStartedPulling="2025-11-21 17:41:16.148614377 +0000 UTC m=+7564.407135385" lastFinishedPulling="2025-11-21 17:41:19.608555709 +0000 UTC m=+7567.867076727" observedRunningTime="2025-11-21 17:41:20.235174736 +0000 UTC m=+7568.493695774" watchObservedRunningTime="2025-11-21 17:41:20.250418723 +0000 UTC m=+7568.508939761" Nov 21 17:41:24 crc kubenswrapper[4967]: I1121 17:41:24.853355 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:24 crc kubenswrapper[4967]: I1121 17:41:24.854103 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:25 crc kubenswrapper[4967]: I1121 17:41:25.940282 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-8ck77" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="registry-server" probeResult="failure" output=< Nov 21 17:41:25 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:41:25 crc kubenswrapper[4967]: > Nov 21 17:41:34 crc kubenswrapper[4967]: I1121 17:41:34.954075 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:35 crc kubenswrapper[4967]: I1121 17:41:35.065829 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:35 crc kubenswrapper[4967]: I1121 17:41:35.217109 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:36 crc kubenswrapper[4967]: I1121 17:41:36.542381 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8ck77" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="registry-server" containerID="cri-o://6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60" gracePeriod=2 Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.283736 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.396071 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content\") pod \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.396469 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities\") pod \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.396713 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hns6d\" (UniqueName: \"kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d\") pod \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\" (UID: \"b0b51bf0-7366-4dde-96b2-c6c6173ff32a\") " Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.397275 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities" (OuterVolumeSpecName: "utilities") pod "b0b51bf0-7366-4dde-96b2-c6c6173ff32a" (UID: "b0b51bf0-7366-4dde-96b2-c6c6173ff32a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.398427 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.410859 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d" (OuterVolumeSpecName: "kube-api-access-hns6d") pod "b0b51bf0-7366-4dde-96b2-c6c6173ff32a" (UID: "b0b51bf0-7366-4dde-96b2-c6c6173ff32a"). InnerVolumeSpecName "kube-api-access-hns6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.446371 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0b51bf0-7366-4dde-96b2-c6c6173ff32a" (UID: "b0b51bf0-7366-4dde-96b2-c6c6173ff32a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.501090 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hns6d\" (UniqueName: \"kubernetes.io/projected/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-kube-api-access-hns6d\") on node \"crc\" DevicePath \"\"" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.501145 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0b51bf0-7366-4dde-96b2-c6c6173ff32a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.558145 4967 generic.go:334] "Generic (PLEG): container finished" podID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerID="6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60" exitCode=0 Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.558207 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerDied","Data":"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60"} Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.558230 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8ck77" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.558249 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8ck77" event={"ID":"b0b51bf0-7366-4dde-96b2-c6c6173ff32a","Type":"ContainerDied","Data":"04d230c0c1b62ea2d9e3799e02df3c0c4d33ff06cbb1dda893713ccb4f8ea6e0"} Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.558278 4967 scope.go:117] "RemoveContainer" containerID="6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.601818 4967 scope.go:117] "RemoveContainer" containerID="b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.646267 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.649835 4967 scope.go:117] "RemoveContainer" containerID="fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.661904 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8ck77"] Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.715321 4967 scope.go:117] "RemoveContainer" containerID="6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60" Nov 21 17:41:37 crc kubenswrapper[4967]: E1121 17:41:37.716006 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60\": container with ID starting with 6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60 not found: ID does not exist" containerID="6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.716059 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60"} err="failed to get container status \"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60\": rpc error: code = NotFound desc = could not find container \"6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60\": container with ID starting with 6d5288bf6b86f6eb88c6a08fcf04e7f54365ece6a027299816df12dfb9291e60 not found: ID does not exist" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.716119 4967 scope.go:117] "RemoveContainer" containerID="b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea" Nov 21 17:41:37 crc kubenswrapper[4967]: E1121 17:41:37.716738 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea\": container with ID starting with b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea not found: ID does not exist" containerID="b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.716836 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea"} err="failed to get container status \"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea\": rpc error: code = NotFound desc = could not find container \"b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea\": container with ID starting with b846e519ecd99982d700f4e337c123bb064c132476ea5ae6869d74afa836bbea not found: ID does not exist" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.716902 4967 scope.go:117] "RemoveContainer" containerID="fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf" Nov 21 17:41:37 crc kubenswrapper[4967]: E1121 17:41:37.717499 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf\": container with ID starting with fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf not found: ID does not exist" containerID="fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf" Nov 21 17:41:37 crc kubenswrapper[4967]: I1121 17:41:37.717624 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf"} err="failed to get container status \"fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf\": rpc error: code = NotFound desc = could not find container \"fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf\": container with ID starting with fbfb3179ee5b72c0bdc5eb7f6227163b98a0e399c5ee6a9fcdc4d1ffa54efccf not found: ID does not exist" Nov 21 17:41:38 crc kubenswrapper[4967]: I1121 17:41:38.578544 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" path="/var/lib/kubelet/pods/b0b51bf0-7366-4dde-96b2-c6c6173ff32a/volumes" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.449397 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:41:59 crc kubenswrapper[4967]: E1121 17:41:59.451770 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="extract-content" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.451796 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="extract-content" Nov 21 17:41:59 crc kubenswrapper[4967]: E1121 17:41:59.451819 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="extract-utilities" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.451828 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="extract-utilities" Nov 21 17:41:59 crc kubenswrapper[4967]: E1121 17:41:59.451854 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="registry-server" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.451864 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="registry-server" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.452355 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0b51bf0-7366-4dde-96b2-c6c6173ff32a" containerName="registry-server" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.455185 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.486352 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.509632 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.509734 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhr82\" (UniqueName: \"kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.509940 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.612706 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.613548 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhr82\" (UniqueName: \"kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.614081 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.615885 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.616423 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.638378 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhr82\" (UniqueName: \"kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82\") pod \"redhat-marketplace-nx9q5\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:41:59 crc kubenswrapper[4967]: I1121 17:41:59.792191 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:00 crc kubenswrapper[4967]: I1121 17:42:00.368045 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:42:01 crc kubenswrapper[4967]: I1121 17:42:01.004098 4967 generic.go:334] "Generic (PLEG): container finished" podID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerID="9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e" exitCode=0 Nov 21 17:42:01 crc kubenswrapper[4967]: I1121 17:42:01.004643 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerDied","Data":"9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e"} Nov 21 17:42:01 crc kubenswrapper[4967]: I1121 17:42:01.004705 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerStarted","Data":"04835d6586bfb6fd63966e3abc1ec9aa258a1ba961ac3899aaa0cb71139126e6"} Nov 21 17:42:03 crc kubenswrapper[4967]: I1121 17:42:03.032892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerStarted","Data":"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c"} Nov 21 17:42:04 crc kubenswrapper[4967]: I1121 17:42:04.045336 4967 generic.go:334] "Generic (PLEG): container finished" podID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerID="caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c" exitCode=0 Nov 21 17:42:04 crc kubenswrapper[4967]: I1121 17:42:04.045777 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerDied","Data":"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c"} Nov 21 17:42:05 crc kubenswrapper[4967]: I1121 17:42:05.065980 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerStarted","Data":"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161"} Nov 21 17:42:05 crc kubenswrapper[4967]: I1121 17:42:05.110067 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nx9q5" podStartSLOduration=2.454852006 podStartE2EDuration="6.11003777s" podCreationTimestamp="2025-11-21 17:41:59 +0000 UTC" firstStartedPulling="2025-11-21 17:42:01.016220543 +0000 UTC m=+7609.274741561" lastFinishedPulling="2025-11-21 17:42:04.671406287 +0000 UTC m=+7612.929927325" observedRunningTime="2025-11-21 17:42:05.089533623 +0000 UTC m=+7613.348054631" watchObservedRunningTime="2025-11-21 17:42:05.11003777 +0000 UTC m=+7613.368558788" Nov 21 17:42:09 crc kubenswrapper[4967]: I1121 17:42:09.792740 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:09 crc kubenswrapper[4967]: I1121 17:42:09.793442 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:09 crc kubenswrapper[4967]: I1121 17:42:09.881906 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:10 crc kubenswrapper[4967]: I1121 17:42:10.194761 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:10 crc kubenswrapper[4967]: I1121 17:42:10.253900 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.150362 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nx9q5" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="registry-server" containerID="cri-o://847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161" gracePeriod=2 Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.744156 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.858174 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content\") pod \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.858443 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities\") pod \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.858583 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhr82\" (UniqueName: \"kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82\") pod \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\" (UID: \"ad554cb0-66c8-4eff-84d5-205651e5cfd4\") " Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.859436 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities" (OuterVolumeSpecName: "utilities") pod "ad554cb0-66c8-4eff-84d5-205651e5cfd4" (UID: "ad554cb0-66c8-4eff-84d5-205651e5cfd4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.869089 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82" (OuterVolumeSpecName: "kube-api-access-hhr82") pod "ad554cb0-66c8-4eff-84d5-205651e5cfd4" (UID: "ad554cb0-66c8-4eff-84d5-205651e5cfd4"). InnerVolumeSpecName "kube-api-access-hhr82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.961755 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhr82\" (UniqueName: \"kubernetes.io/projected/ad554cb0-66c8-4eff-84d5-205651e5cfd4-kube-api-access-hhr82\") on node \"crc\" DevicePath \"\"" Nov 21 17:42:12 crc kubenswrapper[4967]: I1121 17:42:12.961798 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.136755 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad554cb0-66c8-4eff-84d5-205651e5cfd4" (UID: "ad554cb0-66c8-4eff-84d5-205651e5cfd4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.163928 4967 generic.go:334] "Generic (PLEG): container finished" podID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerID="847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161" exitCode=0 Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.163993 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerDied","Data":"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161"} Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.164031 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nx9q5" event={"ID":"ad554cb0-66c8-4eff-84d5-205651e5cfd4","Type":"ContainerDied","Data":"04835d6586bfb6fd63966e3abc1ec9aa258a1ba961ac3899aaa0cb71139126e6"} Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.164055 4967 scope.go:117] "RemoveContainer" containerID="847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.164272 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nx9q5" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.170244 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad554cb0-66c8-4eff-84d5-205651e5cfd4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.253214 4967 scope.go:117] "RemoveContainer" containerID="caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.265104 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.278447 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nx9q5"] Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.348846 4967 scope.go:117] "RemoveContainer" containerID="9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.389968 4967 scope.go:117] "RemoveContainer" containerID="847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161" Nov 21 17:42:13 crc kubenswrapper[4967]: E1121 17:42:13.390442 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161\": container with ID starting with 847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161 not found: ID does not exist" containerID="847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.390477 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161"} err="failed to get container status \"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161\": rpc error: code = NotFound desc = could not find container \"847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161\": container with ID starting with 847953a7e80ffe904bc6ae6ff9a082b0b6254f2a473a49b65f96b2e3b2aa5161 not found: ID does not exist" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.390511 4967 scope.go:117] "RemoveContainer" containerID="caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c" Nov 21 17:42:13 crc kubenswrapper[4967]: E1121 17:42:13.390734 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c\": container with ID starting with caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c not found: ID does not exist" containerID="caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.390756 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c"} err="failed to get container status \"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c\": rpc error: code = NotFound desc = could not find container \"caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c\": container with ID starting with caca8f068f54a411d7d360642971972e52c1e8d8130233bd934f6ac2d382920c not found: ID does not exist" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.390772 4967 scope.go:117] "RemoveContainer" containerID="9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e" Nov 21 17:42:13 crc kubenswrapper[4967]: E1121 17:42:13.390974 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e\": container with ID starting with 9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e not found: ID does not exist" containerID="9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e" Nov 21 17:42:13 crc kubenswrapper[4967]: I1121 17:42:13.390994 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e"} err="failed to get container status \"9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e\": rpc error: code = NotFound desc = could not find container \"9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e\": container with ID starting with 9db18067f869c433fc8fab9d55a79a88da02f153836eeada8c99e7b8bd59010e not found: ID does not exist" Nov 21 17:42:14 crc kubenswrapper[4967]: I1121 17:42:14.551398 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" path="/var/lib/kubelet/pods/ad554cb0-66c8-4eff-84d5-205651e5cfd4/volumes" Nov 21 17:42:16 crc kubenswrapper[4967]: I1121 17:42:16.523857 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:42:16 crc kubenswrapper[4967]: I1121 17:42:16.524200 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:42:46 crc kubenswrapper[4967]: I1121 17:42:46.522723 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:42:46 crc kubenswrapper[4967]: I1121 17:42:46.523895 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.677258 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:42:58 crc kubenswrapper[4967]: E1121 17:42:58.679071 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="extract-content" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.679136 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="extract-content" Nov 21 17:42:58 crc kubenswrapper[4967]: E1121 17:42:58.679197 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="registry-server" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.679213 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="registry-server" Nov 21 17:42:58 crc kubenswrapper[4967]: E1121 17:42:58.679254 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="extract-utilities" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.679265 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="extract-utilities" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.679712 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad554cb0-66c8-4eff-84d5-205651e5cfd4" containerName="registry-server" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.684874 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.690659 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.821520 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.821583 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.821770 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnrdd\" (UniqueName: \"kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.924606 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnrdd\" (UniqueName: \"kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.924768 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.924798 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.925600 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.925619 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:58 crc kubenswrapper[4967]: I1121 17:42:58.952094 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnrdd\" (UniqueName: \"kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd\") pod \"redhat-operators-zx94r\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:59 crc kubenswrapper[4967]: I1121 17:42:59.042501 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:42:59 crc kubenswrapper[4967]: I1121 17:42:59.611467 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:42:59 crc kubenswrapper[4967]: I1121 17:42:59.908949 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerStarted","Data":"d05d8128ac7adaf39828d1bc496199f95ea1e6779516a47751ac4e75e40ce739"} Nov 21 17:43:00 crc kubenswrapper[4967]: I1121 17:43:00.926344 4967 generic.go:334] "Generic (PLEG): container finished" podID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerID="0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d" exitCode=0 Nov 21 17:43:00 crc kubenswrapper[4967]: I1121 17:43:00.926434 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerDied","Data":"0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d"} Nov 21 17:43:02 crc kubenswrapper[4967]: I1121 17:43:02.959761 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerStarted","Data":"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f"} Nov 21 17:43:08 crc kubenswrapper[4967]: I1121 17:43:08.056494 4967 generic.go:334] "Generic (PLEG): container finished" podID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerID="b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f" exitCode=0 Nov 21 17:43:08 crc kubenswrapper[4967]: I1121 17:43:08.056579 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerDied","Data":"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f"} Nov 21 17:43:09 crc kubenswrapper[4967]: I1121 17:43:09.072801 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerStarted","Data":"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c"} Nov 21 17:43:16 crc kubenswrapper[4967]: I1121 17:43:16.522496 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:43:16 crc kubenswrapper[4967]: I1121 17:43:16.523395 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:43:16 crc kubenswrapper[4967]: I1121 17:43:16.523468 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:43:16 crc kubenswrapper[4967]: I1121 17:43:16.525085 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:43:16 crc kubenswrapper[4967]: I1121 17:43:16.525150 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91" gracePeriod=600 Nov 21 17:43:17 crc kubenswrapper[4967]: I1121 17:43:17.237589 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91" exitCode=0 Nov 21 17:43:17 crc kubenswrapper[4967]: I1121 17:43:17.237706 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91"} Nov 21 17:43:17 crc kubenswrapper[4967]: I1121 17:43:17.238540 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324"} Nov 21 17:43:17 crc kubenswrapper[4967]: I1121 17:43:17.238599 4967 scope.go:117] "RemoveContainer" containerID="84446e892e997eb221b0963506a55d45374406b3ccd1574fe2e0eb53b4553cce" Nov 21 17:43:17 crc kubenswrapper[4967]: I1121 17:43:17.279564 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zx94r" podStartSLOduration=11.718270495 podStartE2EDuration="19.279524647s" podCreationTimestamp="2025-11-21 17:42:58 +0000 UTC" firstStartedPulling="2025-11-21 17:43:00.930954753 +0000 UTC m=+7669.189475801" lastFinishedPulling="2025-11-21 17:43:08.492208925 +0000 UTC m=+7676.750729953" observedRunningTime="2025-11-21 17:43:09.102290919 +0000 UTC m=+7677.360811927" watchObservedRunningTime="2025-11-21 17:43:17.279524647 +0000 UTC m=+7685.538045685" Nov 21 17:43:19 crc kubenswrapper[4967]: I1121 17:43:19.042955 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:19 crc kubenswrapper[4967]: I1121 17:43:19.044972 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:20 crc kubenswrapper[4967]: I1121 17:43:20.114225 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zx94r" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="registry-server" probeResult="failure" output=< Nov 21 17:43:20 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:43:20 crc kubenswrapper[4967]: > Nov 21 17:43:29 crc kubenswrapper[4967]: I1121 17:43:29.104004 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:29 crc kubenswrapper[4967]: I1121 17:43:29.158629 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:29 crc kubenswrapper[4967]: I1121 17:43:29.874636 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:43:30 crc kubenswrapper[4967]: I1121 17:43:30.448283 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zx94r" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="registry-server" containerID="cri-o://edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c" gracePeriod=2 Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.142723 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.219508 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnrdd\" (UniqueName: \"kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd\") pod \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.219935 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities\") pod \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.220074 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content\") pod \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\" (UID: \"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f\") " Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.220910 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities" (OuterVolumeSpecName: "utilities") pod "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" (UID: "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.227283 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd" (OuterVolumeSpecName: "kube-api-access-jnrdd") pod "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" (UID: "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f"). InnerVolumeSpecName "kube-api-access-jnrdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.323708 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnrdd\" (UniqueName: \"kubernetes.io/projected/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-kube-api-access-jnrdd\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.324145 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.325148 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" (UID: "eb3c4d5d-a672-496a-8b73-f44a75fb0b4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.427947 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.464388 4967 generic.go:334] "Generic (PLEG): container finished" podID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerID="edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c" exitCode=0 Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.464464 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerDied","Data":"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c"} Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.464524 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zx94r" event={"ID":"eb3c4d5d-a672-496a-8b73-f44a75fb0b4f","Type":"ContainerDied","Data":"d05d8128ac7adaf39828d1bc496199f95ea1e6779516a47751ac4e75e40ce739"} Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.464572 4967 scope.go:117] "RemoveContainer" containerID="edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.464940 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zx94r" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.503967 4967 scope.go:117] "RemoveContainer" containerID="b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.527767 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.544507 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zx94r"] Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.555463 4967 scope.go:117] "RemoveContainer" containerID="0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.618716 4967 scope.go:117] "RemoveContainer" containerID="edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c" Nov 21 17:43:31 crc kubenswrapper[4967]: E1121 17:43:31.619358 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c\": container with ID starting with edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c not found: ID does not exist" containerID="edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.619423 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c"} err="failed to get container status \"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c\": rpc error: code = NotFound desc = could not find container \"edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c\": container with ID starting with edfb64ba30c0acf50169542084d8847924a1c8bcefb832140b4d3a9c1bd4d63c not found: ID does not exist" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.619459 4967 scope.go:117] "RemoveContainer" containerID="b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f" Nov 21 17:43:31 crc kubenswrapper[4967]: E1121 17:43:31.620428 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f\": container with ID starting with b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f not found: ID does not exist" containerID="b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.620488 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f"} err="failed to get container status \"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f\": rpc error: code = NotFound desc = could not find container \"b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f\": container with ID starting with b414260572a46f62205d81e62598e4c3497c57dbe5c7cb6b2185f6b02dcc714f not found: ID does not exist" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.620518 4967 scope.go:117] "RemoveContainer" containerID="0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d" Nov 21 17:43:31 crc kubenswrapper[4967]: E1121 17:43:31.620757 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d\": container with ID starting with 0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d not found: ID does not exist" containerID="0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d" Nov 21 17:43:31 crc kubenswrapper[4967]: I1121 17:43:31.620800 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d"} err="failed to get container status \"0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d\": rpc error: code = NotFound desc = could not find container \"0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d\": container with ID starting with 0902b8ddaca14969271efa9fe1b7fde8993c3a6461ef42feab86d468923b996d not found: ID does not exist" Nov 21 17:43:32 crc kubenswrapper[4967]: I1121 17:43:32.552979 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" path="/var/lib/kubelet/pods/eb3c4d5d-a672-496a-8b73-f44a75fb0b4f/volumes" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.915378 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:37 crc kubenswrapper[4967]: E1121 17:43:37.916981 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="extract-utilities" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.916998 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="extract-utilities" Nov 21 17:43:37 crc kubenswrapper[4967]: E1121 17:43:37.917066 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="registry-server" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.917074 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="registry-server" Nov 21 17:43:37 crc kubenswrapper[4967]: E1121 17:43:37.917091 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="extract-content" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.917097 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="extract-content" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.917417 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb3c4d5d-a672-496a-8b73-f44a75fb0b4f" containerName="registry-server" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.923563 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.935200 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.935406 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.935470 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6l4v\" (UniqueName: \"kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:37 crc kubenswrapper[4967]: I1121 17:43:37.935611 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.039289 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.039375 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6l4v\" (UniqueName: \"kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.039547 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.040057 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.041466 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.073737 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6l4v\" (UniqueName: \"kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v\") pod \"community-operators-xngf8\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.274578 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:38 crc kubenswrapper[4967]: I1121 17:43:38.887840 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:39 crc kubenswrapper[4967]: I1121 17:43:39.636695 4967 generic.go:334] "Generic (PLEG): container finished" podID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerID="6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c" exitCode=0 Nov 21 17:43:39 crc kubenswrapper[4967]: I1121 17:43:39.637104 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerDied","Data":"6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c"} Nov 21 17:43:39 crc kubenswrapper[4967]: I1121 17:43:39.637142 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerStarted","Data":"5e60689088c3c18e13c6b457a48844bef1eacdfcb351353b239acbcdd544c668"} Nov 21 17:43:40 crc kubenswrapper[4967]: I1121 17:43:40.653262 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerStarted","Data":"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed"} Nov 21 17:43:42 crc kubenswrapper[4967]: I1121 17:43:42.695198 4967 generic.go:334] "Generic (PLEG): container finished" podID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerID="60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed" exitCode=0 Nov 21 17:43:42 crc kubenswrapper[4967]: I1121 17:43:42.695288 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerDied","Data":"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed"} Nov 21 17:43:43 crc kubenswrapper[4967]: I1121 17:43:43.711680 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerStarted","Data":"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d"} Nov 21 17:43:43 crc kubenswrapper[4967]: I1121 17:43:43.743343 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xngf8" podStartSLOduration=3.291086652 podStartE2EDuration="6.743293221s" podCreationTimestamp="2025-11-21 17:43:37 +0000 UTC" firstStartedPulling="2025-11-21 17:43:39.65770428 +0000 UTC m=+7707.916225288" lastFinishedPulling="2025-11-21 17:43:43.109910839 +0000 UTC m=+7711.368431857" observedRunningTime="2025-11-21 17:43:43.734759867 +0000 UTC m=+7711.993280885" watchObservedRunningTime="2025-11-21 17:43:43.743293221 +0000 UTC m=+7712.001814239" Nov 21 17:43:48 crc kubenswrapper[4967]: I1121 17:43:48.275484 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:48 crc kubenswrapper[4967]: I1121 17:43:48.276705 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:48 crc kubenswrapper[4967]: I1121 17:43:48.352894 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:48 crc kubenswrapper[4967]: I1121 17:43:48.890287 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:51 crc kubenswrapper[4967]: I1121 17:43:51.878559 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:51 crc kubenswrapper[4967]: I1121 17:43:51.879977 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xngf8" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="registry-server" containerID="cri-o://68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d" gracePeriod=2 Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.657092 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.724616 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities\") pod \"658180e9-bba5-41c6-91f7-cd059a63fd32\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.724685 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6l4v\" (UniqueName: \"kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v\") pod \"658180e9-bba5-41c6-91f7-cd059a63fd32\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.724760 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content\") pod \"658180e9-bba5-41c6-91f7-cd059a63fd32\" (UID: \"658180e9-bba5-41c6-91f7-cd059a63fd32\") " Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.726063 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities" (OuterVolumeSpecName: "utilities") pod "658180e9-bba5-41c6-91f7-cd059a63fd32" (UID: "658180e9-bba5-41c6-91f7-cd059a63fd32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.734101 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v" (OuterVolumeSpecName: "kube-api-access-r6l4v") pod "658180e9-bba5-41c6-91f7-cd059a63fd32" (UID: "658180e9-bba5-41c6-91f7-cd059a63fd32"). InnerVolumeSpecName "kube-api-access-r6l4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.798171 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "658180e9-bba5-41c6-91f7-cd059a63fd32" (UID: "658180e9-bba5-41c6-91f7-cd059a63fd32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.828748 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6l4v\" (UniqueName: \"kubernetes.io/projected/658180e9-bba5-41c6-91f7-cd059a63fd32-kube-api-access-r6l4v\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.828785 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.828798 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/658180e9-bba5-41c6-91f7-cd059a63fd32-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.844921 4967 generic.go:334] "Generic (PLEG): container finished" podID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerID="68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d" exitCode=0 Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.844976 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerDied","Data":"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d"} Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.845013 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xngf8" event={"ID":"658180e9-bba5-41c6-91f7-cd059a63fd32","Type":"ContainerDied","Data":"5e60689088c3c18e13c6b457a48844bef1eacdfcb351353b239acbcdd544c668"} Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.845032 4967 scope.go:117] "RemoveContainer" containerID="68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.845211 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xngf8" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.883190 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.888284 4967 scope.go:117] "RemoveContainer" containerID="60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.898524 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xngf8"] Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.933746 4967 scope.go:117] "RemoveContainer" containerID="6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.973979 4967 scope.go:117] "RemoveContainer" containerID="68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d" Nov 21 17:43:52 crc kubenswrapper[4967]: E1121 17:43:52.974677 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d\": container with ID starting with 68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d not found: ID does not exist" containerID="68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.974725 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d"} err="failed to get container status \"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d\": rpc error: code = NotFound desc = could not find container \"68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d\": container with ID starting with 68a8ef11e2b97aa5211f14f9b7d4980697a7628eab417c110b02d8ce14d6a26d not found: ID does not exist" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.974753 4967 scope.go:117] "RemoveContainer" containerID="60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed" Nov 21 17:43:52 crc kubenswrapper[4967]: E1121 17:43:52.975146 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed\": container with ID starting with 60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed not found: ID does not exist" containerID="60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.975168 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed"} err="failed to get container status \"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed\": rpc error: code = NotFound desc = could not find container \"60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed\": container with ID starting with 60cd15065484450ec60d516bbf9e2c5cba7c667704316d96d265bceda3136bed not found: ID does not exist" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.975182 4967 scope.go:117] "RemoveContainer" containerID="6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c" Nov 21 17:43:52 crc kubenswrapper[4967]: E1121 17:43:52.975682 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c\": container with ID starting with 6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c not found: ID does not exist" containerID="6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c" Nov 21 17:43:52 crc kubenswrapper[4967]: I1121 17:43:52.975699 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c"} err="failed to get container status \"6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c\": rpc error: code = NotFound desc = could not find container \"6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c\": container with ID starting with 6be9a46c5e0dc4546c6f067439501da799be5564b58309f479fdc3334927894c not found: ID does not exist" Nov 21 17:43:54 crc kubenswrapper[4967]: I1121 17:43:54.560643 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" path="/var/lib/kubelet/pods/658180e9-bba5-41c6-91f7-cd059a63fd32/volumes" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.166842 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm"] Nov 21 17:45:00 crc kubenswrapper[4967]: E1121 17:45:00.168304 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="extract-content" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.168347 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="extract-content" Nov 21 17:45:00 crc kubenswrapper[4967]: E1121 17:45:00.168387 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="extract-utilities" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.168397 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="extract-utilities" Nov 21 17:45:00 crc kubenswrapper[4967]: E1121 17:45:00.168456 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="registry-server" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.168468 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="registry-server" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.168780 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="658180e9-bba5-41c6-91f7-cd059a63fd32" containerName="registry-server" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.174496 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.178417 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.179815 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.187608 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm"] Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.215533 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.215639 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.215800 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vht2\" (UniqueName: \"kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.318764 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.318925 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vht2\" (UniqueName: \"kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.319122 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.319929 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.325625 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.344988 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vht2\" (UniqueName: \"kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2\") pod \"collect-profiles-29395785-w5wdm\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:00 crc kubenswrapper[4967]: I1121 17:45:00.509592 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:01 crc kubenswrapper[4967]: I1121 17:45:01.127301 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm"] Nov 21 17:45:01 crc kubenswrapper[4967]: I1121 17:45:01.900446 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" event={"ID":"fcc587e9-9fe6-4201-b71f-1badd9370f1c","Type":"ContainerStarted","Data":"a1e99c9033e1e1dfe28d80814c690ccdeca431a94358f45aa71fedbe28b68ae2"} Nov 21 17:45:01 crc kubenswrapper[4967]: I1121 17:45:01.900900 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" event={"ID":"fcc587e9-9fe6-4201-b71f-1badd9370f1c","Type":"ContainerStarted","Data":"39f37f8fc4f8a8ab7b36329900871e847d4842be5540dea74b3d4ae4954f170c"} Nov 21 17:45:01 crc kubenswrapper[4967]: I1121 17:45:01.938130 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" podStartSLOduration=1.938103358 podStartE2EDuration="1.938103358s" podCreationTimestamp="2025-11-21 17:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 17:45:01.917877808 +0000 UTC m=+7790.176398846" watchObservedRunningTime="2025-11-21 17:45:01.938103358 +0000 UTC m=+7790.196624366" Nov 21 17:45:02 crc kubenswrapper[4967]: I1121 17:45:02.916534 4967 generic.go:334] "Generic (PLEG): container finished" podID="fcc587e9-9fe6-4201-b71f-1badd9370f1c" containerID="a1e99c9033e1e1dfe28d80814c690ccdeca431a94358f45aa71fedbe28b68ae2" exitCode=0 Nov 21 17:45:02 crc kubenswrapper[4967]: I1121 17:45:02.916616 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" event={"ID":"fcc587e9-9fe6-4201-b71f-1badd9370f1c","Type":"ContainerDied","Data":"a1e99c9033e1e1dfe28d80814c690ccdeca431a94358f45aa71fedbe28b68ae2"} Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.442955 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.626639 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume\") pod \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.627272 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume\") pod \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.627757 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume" (OuterVolumeSpecName: "config-volume") pod "fcc587e9-9fe6-4201-b71f-1badd9370f1c" (UID: "fcc587e9-9fe6-4201-b71f-1badd9370f1c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.629303 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vht2\" (UniqueName: \"kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2\") pod \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\" (UID: \"fcc587e9-9fe6-4201-b71f-1badd9370f1c\") " Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.631466 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fcc587e9-9fe6-4201-b71f-1badd9370f1c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.637743 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fcc587e9-9fe6-4201-b71f-1badd9370f1c" (UID: "fcc587e9-9fe6-4201-b71f-1badd9370f1c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.648050 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2" (OuterVolumeSpecName: "kube-api-access-5vht2") pod "fcc587e9-9fe6-4201-b71f-1badd9370f1c" (UID: "fcc587e9-9fe6-4201-b71f-1badd9370f1c"). InnerVolumeSpecName "kube-api-access-5vht2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.734544 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fcc587e9-9fe6-4201-b71f-1badd9370f1c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.734597 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vht2\" (UniqueName: \"kubernetes.io/projected/fcc587e9-9fe6-4201-b71f-1badd9370f1c-kube-api-access-5vht2\") on node \"crc\" DevicePath \"\"" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.952225 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" event={"ID":"fcc587e9-9fe6-4201-b71f-1badd9370f1c","Type":"ContainerDied","Data":"39f37f8fc4f8a8ab7b36329900871e847d4842be5540dea74b3d4ae4954f170c"} Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.952279 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39f37f8fc4f8a8ab7b36329900871e847d4842be5540dea74b3d4ae4954f170c" Nov 21 17:45:04 crc kubenswrapper[4967]: I1121 17:45:04.952285 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395785-w5wdm" Nov 21 17:45:05 crc kubenswrapper[4967]: I1121 17:45:05.020749 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq"] Nov 21 17:45:05 crc kubenswrapper[4967]: I1121 17:45:05.035620 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395740-mcstq"] Nov 21 17:45:06 crc kubenswrapper[4967]: I1121 17:45:06.553899 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f288f452-99e9-4291-8f01-737ffb7610bd" path="/var/lib/kubelet/pods/f288f452-99e9-4291-8f01-737ffb7610bd/volumes" Nov 21 17:45:16 crc kubenswrapper[4967]: I1121 17:45:16.522234 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:45:16 crc kubenswrapper[4967]: I1121 17:45:16.523502 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:45:46 crc kubenswrapper[4967]: I1121 17:45:46.469436 4967 scope.go:117] "RemoveContainer" containerID="0dce33fc76bce54d70e276140e9f003c1001bbe3e3857b80e9f3eef4d4a1fe9f" Nov 21 17:45:46 crc kubenswrapper[4967]: I1121 17:45:46.523555 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:45:46 crc kubenswrapper[4967]: I1121 17:45:46.523654 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.522194 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.523014 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.523093 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.525040 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.525122 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" gracePeriod=600 Nov 21 17:46:16 crc kubenswrapper[4967]: E1121 17:46:16.686599 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.940099 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" exitCode=0 Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.940183 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324"} Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.940275 4967 scope.go:117] "RemoveContainer" containerID="67a21db30f97c1a97d8586d35df714af60a429d9fe77042ad42a28e73c63ab91" Nov 21 17:46:16 crc kubenswrapper[4967]: I1121 17:46:16.941905 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:46:16 crc kubenswrapper[4967]: E1121 17:46:16.942677 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:46:30 crc kubenswrapper[4967]: I1121 17:46:30.536802 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:46:30 crc kubenswrapper[4967]: E1121 17:46:30.537663 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:46:41 crc kubenswrapper[4967]: I1121 17:46:41.536465 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:46:41 crc kubenswrapper[4967]: E1121 17:46:41.537291 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:46:53 crc kubenswrapper[4967]: I1121 17:46:53.541600 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:46:53 crc kubenswrapper[4967]: E1121 17:46:53.543029 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:47:04 crc kubenswrapper[4967]: I1121 17:47:04.537173 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:47:04 crc kubenswrapper[4967]: E1121 17:47:04.538519 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:47:19 crc kubenswrapper[4967]: I1121 17:47:19.537709 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:47:19 crc kubenswrapper[4967]: E1121 17:47:19.538579 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:47:30 crc kubenswrapper[4967]: I1121 17:47:30.539559 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:47:30 crc kubenswrapper[4967]: E1121 17:47:30.540920 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:47:42 crc kubenswrapper[4967]: I1121 17:47:42.546763 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:47:42 crc kubenswrapper[4967]: E1121 17:47:42.548556 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:47:53 crc kubenswrapper[4967]: I1121 17:47:53.537042 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:47:53 crc kubenswrapper[4967]: E1121 17:47:53.538017 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:48:08 crc kubenswrapper[4967]: I1121 17:48:08.540127 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:48:08 crc kubenswrapper[4967]: E1121 17:48:08.541637 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:48:23 crc kubenswrapper[4967]: I1121 17:48:23.537546 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:48:23 crc kubenswrapper[4967]: E1121 17:48:23.538483 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:48:36 crc kubenswrapper[4967]: I1121 17:48:36.538986 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:48:36 crc kubenswrapper[4967]: E1121 17:48:36.544166 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:48:49 crc kubenswrapper[4967]: I1121 17:48:49.537186 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:48:49 crc kubenswrapper[4967]: E1121 17:48:49.538314 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:04 crc kubenswrapper[4967]: I1121 17:49:04.537402 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:49:04 crc kubenswrapper[4967]: E1121 17:49:04.538059 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:16 crc kubenswrapper[4967]: I1121 17:49:16.538703 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:49:16 crc kubenswrapper[4967]: E1121 17:49:16.547169 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:31 crc kubenswrapper[4967]: I1121 17:49:31.537016 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:49:31 crc kubenswrapper[4967]: E1121 17:49:31.537667 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:44 crc kubenswrapper[4967]: I1121 17:49:44.538209 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:49:44 crc kubenswrapper[4967]: E1121 17:49:44.539919 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:48 crc kubenswrapper[4967]: I1121 17:49:48.576638 4967 generic.go:334] "Generic (PLEG): container finished" podID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" containerID="84ec716b35ca4d68dd68f0435cee2e226fa6b64f77d91d801231aaa05f5e7e2c" exitCode=1 Nov 21 17:49:48 crc kubenswrapper[4967]: I1121 17:49:48.576955 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa","Type":"ContainerDied","Data":"84ec716b35ca4d68dd68f0435cee2e226fa6b64f77d91d801231aaa05f5e7e2c"} Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.141750 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241061 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241159 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241422 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241498 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241701 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241758 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241919 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.241996 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.242080 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lhzb\" (UniqueName: \"kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb\") pod \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\" (UID: \"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa\") " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.244089 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data" (OuterVolumeSpecName: "config-data") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.254812 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.260694 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb" (OuterVolumeSpecName: "kube-api-access-7lhzb") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "kube-api-access-7lhzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.262379 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.286006 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "test-operator-logs") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.304911 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.307729 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.316505 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.345465 4967 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372610 4967 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372642 4967 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372657 4967 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372669 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372679 4967 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372691 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.372706 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lhzb\" (UniqueName: \"kubernetes.io/projected/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-kube-api-access-7lhzb\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.404822 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" (UID: "e74fdffd-f5c7-4be6-8d37-5d9e07704aaa"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.429598 4967 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.474938 4967 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e74fdffd-f5c7-4be6-8d37-5d9e07704aaa-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.475164 4967 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.604659 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"e74fdffd-f5c7-4be6-8d37-5d9e07704aaa","Type":"ContainerDied","Data":"7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7"} Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.604713 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7316c789a90ba2142d5b83b145351b50f85170944668732493fe8107710b49b7" Nov 21 17:49:50 crc kubenswrapper[4967]: I1121 17:49:50.604799 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 17:49:57 crc kubenswrapper[4967]: I1121 17:49:57.543025 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:49:57 crc kubenswrapper[4967]: E1121 17:49:57.546085 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.807821 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 17:49:59 crc kubenswrapper[4967]: E1121 17:49:59.810199 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fcc587e9-9fe6-4201-b71f-1badd9370f1c" containerName="collect-profiles" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.810299 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="fcc587e9-9fe6-4201-b71f-1badd9370f1c" containerName="collect-profiles" Nov 21 17:49:59 crc kubenswrapper[4967]: E1121 17:49:59.810557 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" containerName="tempest-tests-tempest-tests-runner" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.810661 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" containerName="tempest-tests-tempest-tests-runner" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.811047 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="e74fdffd-f5c7-4be6-8d37-5d9e07704aaa" containerName="tempest-tests-tempest-tests-runner" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.811166 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="fcc587e9-9fe6-4201-b71f-1badd9370f1c" containerName="collect-profiles" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.812500 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.816409 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-nc5kp" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.823489 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.879899 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.880036 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9ng8\" (UniqueName: \"kubernetes.io/projected/2bacaf27-62d6-464e-84ae-0f8e30cf0147-kube-api-access-d9ng8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.983092 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.983272 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9ng8\" (UniqueName: \"kubernetes.io/projected/2bacaf27-62d6-464e-84ae-0f8e30cf0147-kube-api-access-d9ng8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:49:59 crc kubenswrapper[4967]: I1121 17:49:59.985410 4967 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.013024 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9ng8\" (UniqueName: \"kubernetes.io/projected/2bacaf27-62d6-464e-84ae-0f8e30cf0147-kube-api-access-d9ng8\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.018472 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2bacaf27-62d6-464e-84ae-0f8e30cf0147\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.148601 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.716934 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.739700 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 17:50:00 crc kubenswrapper[4967]: I1121 17:50:00.788386 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2bacaf27-62d6-464e-84ae-0f8e30cf0147","Type":"ContainerStarted","Data":"bd954df28db10dfdef6f076003e3c7d11a3ea50e945e67b35671d44885b07e70"} Nov 21 17:50:04 crc kubenswrapper[4967]: I1121 17:50:04.863418 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2bacaf27-62d6-464e-84ae-0f8e30cf0147","Type":"ContainerStarted","Data":"f45f5d2853c495bf1553e6724192c3ce7769c81058c7b366f3b2c1d71c43e7fd"} Nov 21 17:50:04 crc kubenswrapper[4967]: I1121 17:50:04.883677 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.640773385 podStartE2EDuration="5.883656757s" podCreationTimestamp="2025-11-21 17:49:59 +0000 UTC" firstStartedPulling="2025-11-21 17:50:00.739343326 +0000 UTC m=+8088.997864334" lastFinishedPulling="2025-11-21 17:50:03.982226687 +0000 UTC m=+8092.240747706" observedRunningTime="2025-11-21 17:50:04.881511256 +0000 UTC m=+8093.140032294" watchObservedRunningTime="2025-11-21 17:50:04.883656757 +0000 UTC m=+8093.142177765" Nov 21 17:50:11 crc kubenswrapper[4967]: I1121 17:50:11.538225 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:50:11 crc kubenswrapper[4967]: E1121 17:50:11.539897 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:50:24 crc kubenswrapper[4967]: I1121 17:50:24.539593 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:50:24 crc kubenswrapper[4967]: E1121 17:50:24.540703 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:50:39 crc kubenswrapper[4967]: I1121 17:50:39.537988 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:50:39 crc kubenswrapper[4967]: E1121 17:50:39.539371 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.283525 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-97nf5/must-gather-lmdw5"] Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.286877 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.288972 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-97nf5"/"default-dockercfg-zbh4d" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.292637 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-97nf5"/"openshift-service-ca.crt" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.293344 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-97nf5"/"kube-root-ca.crt" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.296864 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-97nf5/must-gather-lmdw5"] Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.474883 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.475499 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv8g2\" (UniqueName: \"kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.579114 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.579329 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv8g2\" (UniqueName: \"kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.580485 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.607128 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv8g2\" (UniqueName: \"kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2\") pod \"must-gather-lmdw5\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:48 crc kubenswrapper[4967]: I1121 17:50:48.631001 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:50:49 crc kubenswrapper[4967]: I1121 17:50:49.414455 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-97nf5/must-gather-lmdw5"] Nov 21 17:50:49 crc kubenswrapper[4967]: I1121 17:50:49.514676 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/must-gather-lmdw5" event={"ID":"d9f8b255-4914-4511-8534-2814bc1c1181","Type":"ContainerStarted","Data":"75fc7bbd03dbcf04a632c2a796b322c67ec894cbf8bb4aa53e1449dae59d14cc"} Nov 21 17:50:50 crc kubenswrapper[4967]: I1121 17:50:50.538457 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:50:50 crc kubenswrapper[4967]: E1121 17:50:50.539638 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:51:01 crc kubenswrapper[4967]: I1121 17:51:01.539493 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:51:01 crc kubenswrapper[4967]: E1121 17:51:01.540615 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:51:02 crc kubenswrapper[4967]: I1121 17:51:02.767107 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/must-gather-lmdw5" event={"ID":"d9f8b255-4914-4511-8534-2814bc1c1181","Type":"ContainerStarted","Data":"bafa52768d982ffd5caa061e21e16e7132e166abe3f9175fe429f845465e68a8"} Nov 21 17:51:02 crc kubenswrapper[4967]: I1121 17:51:02.768500 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/must-gather-lmdw5" event={"ID":"d9f8b255-4914-4511-8534-2814bc1c1181","Type":"ContainerStarted","Data":"babbe72c705a64d8422c3d9fbd0246f24c7a47e407478bad9667048c7a98ff20"} Nov 21 17:51:02 crc kubenswrapper[4967]: I1121 17:51:02.789103 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-97nf5/must-gather-lmdw5" podStartSLOduration=2.640089375 podStartE2EDuration="14.789083239s" podCreationTimestamp="2025-11-21 17:50:48 +0000 UTC" firstStartedPulling="2025-11-21 17:50:49.419574719 +0000 UTC m=+8137.678095727" lastFinishedPulling="2025-11-21 17:51:01.568568563 +0000 UTC m=+8149.827089591" observedRunningTime="2025-11-21 17:51:02.788688197 +0000 UTC m=+8151.047209205" watchObservedRunningTime="2025-11-21 17:51:02.789083239 +0000 UTC m=+8151.047604237" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.275302 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-97nf5/crc-debug-vkr7h"] Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.278383 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.337202 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.337909 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztsdz\" (UniqueName: \"kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.442165 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.442276 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztsdz\" (UniqueName: \"kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.442371 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.487224 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztsdz\" (UniqueName: \"kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz\") pod \"crc-debug-vkr7h\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.602908 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:51:13 crc kubenswrapper[4967]: I1121 17:51:13.981606 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" event={"ID":"066395b5-1820-4f1c-a7ad-6956302d10b1","Type":"ContainerStarted","Data":"9ce9e74942290bd40d9a2b495c718f6a90d89c082de4a06ca4fe684247e8d358"} Nov 21 17:51:14 crc kubenswrapper[4967]: I1121 17:51:14.537207 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:51:14 crc kubenswrapper[4967]: E1121 17:51:14.537603 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:51:25 crc kubenswrapper[4967]: I1121 17:51:25.537051 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:51:28 crc kubenswrapper[4967]: I1121 17:51:28.207145 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4"} Nov 21 17:51:28 crc kubenswrapper[4967]: I1121 17:51:28.220448 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" event={"ID":"066395b5-1820-4f1c-a7ad-6956302d10b1","Type":"ContainerStarted","Data":"8e4273ce9f73ff5b303c4a026031340afe26b1db282415664ba9d1f91d6d194c"} Nov 21 17:51:28 crc kubenswrapper[4967]: I1121 17:51:28.317730 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" podStartSLOduration=1.561178883 podStartE2EDuration="15.317696679s" podCreationTimestamp="2025-11-21 17:51:13 +0000 UTC" firstStartedPulling="2025-11-21 17:51:13.718578116 +0000 UTC m=+8161.977099124" lastFinishedPulling="2025-11-21 17:51:27.475095912 +0000 UTC m=+8175.733616920" observedRunningTime="2025-11-21 17:51:28.280764733 +0000 UTC m=+8176.539285741" watchObservedRunningTime="2025-11-21 17:51:28.317696679 +0000 UTC m=+8176.576217687" Nov 21 17:51:37 crc kubenswrapper[4967]: I1121 17:51:37.998758 4967 trace.go:236] Trace[2018488067]: "Calculate volume metrics of storage for pod openshift-logging/logging-loki-compactor-0" (21-Nov-2025 17:51:36.891) (total time: 1106ms): Nov 21 17:51:37 crc kubenswrapper[4967]: Trace[2018488067]: [1.106835334s] [1.106835334s] END Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.297383 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.303440 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.389911 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.390083 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcxkw\" (UniqueName: \"kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.390625 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.390745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.494957 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcxkw\" (UniqueName: \"kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.495138 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.495235 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.495911 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.495992 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.523341 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcxkw\" (UniqueName: \"kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw\") pod \"redhat-marketplace-xv8r7\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:05 crc kubenswrapper[4967]: I1121 17:52:05.638684 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:06 crc kubenswrapper[4967]: I1121 17:52:06.616155 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:06 crc kubenswrapper[4967]: I1121 17:52:06.716422 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerStarted","Data":"638020be6425a657c90febabd87c21a5a40a83b63e00d0a843c0529b96250174"} Nov 21 17:52:07 crc kubenswrapper[4967]: I1121 17:52:07.757921 4967 generic.go:334] "Generic (PLEG): container finished" podID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerID="eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed" exitCode=0 Nov 21 17:52:07 crc kubenswrapper[4967]: I1121 17:52:07.758300 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerDied","Data":"eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed"} Nov 21 17:52:08 crc kubenswrapper[4967]: I1121 17:52:08.772771 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerStarted","Data":"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db"} Nov 21 17:52:09 crc kubenswrapper[4967]: E1121 17:52:09.563069 4967 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81d2adbd_2cb1_4cea_bf8a_84cab688ec22.slice/crio-76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db.scope\": RecentStats: unable to find data in memory cache]" Nov 21 17:52:09 crc kubenswrapper[4967]: I1121 17:52:09.792205 4967 generic.go:334] "Generic (PLEG): container finished" podID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerID="76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db" exitCode=0 Nov 21 17:52:09 crc kubenswrapper[4967]: I1121 17:52:09.792285 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerDied","Data":"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db"} Nov 21 17:52:10 crc kubenswrapper[4967]: I1121 17:52:10.810877 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerStarted","Data":"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f"} Nov 21 17:52:10 crc kubenswrapper[4967]: I1121 17:52:10.839696 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xv8r7" podStartSLOduration=3.29634515 podStartE2EDuration="5.839673186s" podCreationTimestamp="2025-11-21 17:52:05 +0000 UTC" firstStartedPulling="2025-11-21 17:52:07.763425689 +0000 UTC m=+8216.021946697" lastFinishedPulling="2025-11-21 17:52:10.306753725 +0000 UTC m=+8218.565274733" observedRunningTime="2025-11-21 17:52:10.830175975 +0000 UTC m=+8219.088696983" watchObservedRunningTime="2025-11-21 17:52:10.839673186 +0000 UTC m=+8219.098194194" Nov 21 17:52:15 crc kubenswrapper[4967]: I1121 17:52:15.639192 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:15 crc kubenswrapper[4967]: I1121 17:52:15.639693 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:15 crc kubenswrapper[4967]: I1121 17:52:15.754562 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:15 crc kubenswrapper[4967]: I1121 17:52:15.987760 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:16 crc kubenswrapper[4967]: I1121 17:52:16.070252 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:17 crc kubenswrapper[4967]: I1121 17:52:17.940510 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xv8r7" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="registry-server" containerID="cri-o://38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f" gracePeriod=2 Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.588045 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.683291 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities\") pod \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.683496 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content\") pod \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.683717 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcxkw\" (UniqueName: \"kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw\") pod \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\" (UID: \"81d2adbd-2cb1-4cea-bf8a-84cab688ec22\") " Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.684618 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities" (OuterVolumeSpecName: "utilities") pod "81d2adbd-2cb1-4cea-bf8a-84cab688ec22" (UID: "81d2adbd-2cb1-4cea-bf8a-84cab688ec22"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.684865 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.698628 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw" (OuterVolumeSpecName: "kube-api-access-tcxkw") pod "81d2adbd-2cb1-4cea-bf8a-84cab688ec22" (UID: "81d2adbd-2cb1-4cea-bf8a-84cab688ec22"). InnerVolumeSpecName "kube-api-access-tcxkw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.722284 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "81d2adbd-2cb1-4cea-bf8a-84cab688ec22" (UID: "81d2adbd-2cb1-4cea-bf8a-84cab688ec22"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.787464 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.787507 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcxkw\" (UniqueName: \"kubernetes.io/projected/81d2adbd-2cb1-4cea-bf8a-84cab688ec22-kube-api-access-tcxkw\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.957237 4967 generic.go:334] "Generic (PLEG): container finished" podID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerID="38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f" exitCode=0 Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.957292 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerDied","Data":"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f"} Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.957354 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xv8r7" event={"ID":"81d2adbd-2cb1-4cea-bf8a-84cab688ec22","Type":"ContainerDied","Data":"638020be6425a657c90febabd87c21a5a40a83b63e00d0a843c0529b96250174"} Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.957377 4967 scope.go:117] "RemoveContainer" containerID="38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.957295 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xv8r7" Nov 21 17:52:18 crc kubenswrapper[4967]: I1121 17:52:18.996924 4967 scope.go:117] "RemoveContainer" containerID="76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.008782 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.015635 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xv8r7"] Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.081412 4967 scope.go:117] "RemoveContainer" containerID="eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.106836 4967 scope.go:117] "RemoveContainer" containerID="38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f" Nov 21 17:52:19 crc kubenswrapper[4967]: E1121 17:52:19.113894 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f\": container with ID starting with 38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f not found: ID does not exist" containerID="38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.113939 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f"} err="failed to get container status \"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f\": rpc error: code = NotFound desc = could not find container \"38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f\": container with ID starting with 38a0873b3a375a3bb13a7be7d751b4e15266d15a6d6db98088408963a9b1838f not found: ID does not exist" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.113972 4967 scope.go:117] "RemoveContainer" containerID="76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db" Nov 21 17:52:19 crc kubenswrapper[4967]: E1121 17:52:19.115850 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db\": container with ID starting with 76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db not found: ID does not exist" containerID="76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.115883 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db"} err="failed to get container status \"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db\": rpc error: code = NotFound desc = could not find container \"76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db\": container with ID starting with 76c673d4366a3d4fa5576105757455383ef7049e8ee1f0fae87883a1bc39e3db not found: ID does not exist" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.115902 4967 scope.go:117] "RemoveContainer" containerID="eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed" Nov 21 17:52:19 crc kubenswrapper[4967]: E1121 17:52:19.116620 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed\": container with ID starting with eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed not found: ID does not exist" containerID="eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed" Nov 21 17:52:19 crc kubenswrapper[4967]: I1121 17:52:19.116647 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed"} err="failed to get container status \"eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed\": rpc error: code = NotFound desc = could not find container \"eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed\": container with ID starting with eb7c15565b113e8cd359a8a6d2a8894ad4fac1870d943c1eaa7c03a9f5a87fed not found: ID does not exist" Nov 21 17:52:20 crc kubenswrapper[4967]: I1121 17:52:20.555956 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" path="/var/lib/kubelet/pods/81d2adbd-2cb1-4cea-bf8a-84cab688ec22/volumes" Nov 21 17:52:32 crc kubenswrapper[4967]: I1121 17:52:32.153541 4967 generic.go:334] "Generic (PLEG): container finished" podID="066395b5-1820-4f1c-a7ad-6956302d10b1" containerID="8e4273ce9f73ff5b303c4a026031340afe26b1db282415664ba9d1f91d6d194c" exitCode=0 Nov 21 17:52:32 crc kubenswrapper[4967]: I1121 17:52:32.153657 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" event={"ID":"066395b5-1820-4f1c-a7ad-6956302d10b1","Type":"ContainerDied","Data":"8e4273ce9f73ff5b303c4a026031340afe26b1db282415664ba9d1f91d6d194c"} Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.306378 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.354015 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-vkr7h"] Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.368122 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-vkr7h"] Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.425888 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host\") pod \"066395b5-1820-4f1c-a7ad-6956302d10b1\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.426019 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host" (OuterVolumeSpecName: "host") pod "066395b5-1820-4f1c-a7ad-6956302d10b1" (UID: "066395b5-1820-4f1c-a7ad-6956302d10b1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.426216 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztsdz\" (UniqueName: \"kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz\") pod \"066395b5-1820-4f1c-a7ad-6956302d10b1\" (UID: \"066395b5-1820-4f1c-a7ad-6956302d10b1\") " Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.427223 4967 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/066395b5-1820-4f1c-a7ad-6956302d10b1-host\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.442513 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz" (OuterVolumeSpecName: "kube-api-access-ztsdz") pod "066395b5-1820-4f1c-a7ad-6956302d10b1" (UID: "066395b5-1820-4f1c-a7ad-6956302d10b1"). InnerVolumeSpecName "kube-api-access-ztsdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:52:33 crc kubenswrapper[4967]: I1121 17:52:33.530490 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztsdz\" (UniqueName: \"kubernetes.io/projected/066395b5-1820-4f1c-a7ad-6956302d10b1-kube-api-access-ztsdz\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.191703 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ce9e74942290bd40d9a2b495c718f6a90d89c082de4a06ca4fe684247e8d358" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.191835 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-vkr7h" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.567241 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="066395b5-1820-4f1c-a7ad-6956302d10b1" path="/var/lib/kubelet/pods/066395b5-1820-4f1c-a7ad-6956302d10b1/volumes" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.662008 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-97nf5/crc-debug-sbr69"] Nov 21 17:52:34 crc kubenswrapper[4967]: E1121 17:52:34.662943 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="registry-server" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.662966 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="registry-server" Nov 21 17:52:34 crc kubenswrapper[4967]: E1121 17:52:34.662996 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="066395b5-1820-4f1c-a7ad-6956302d10b1" containerName="container-00" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.663006 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="066395b5-1820-4f1c-a7ad-6956302d10b1" containerName="container-00" Nov 21 17:52:34 crc kubenswrapper[4967]: E1121 17:52:34.663047 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="extract-utilities" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.663056 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="extract-utilities" Nov 21 17:52:34 crc kubenswrapper[4967]: E1121 17:52:34.663075 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="extract-content" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.663083 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="extract-content" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.663391 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="066395b5-1820-4f1c-a7ad-6956302d10b1" containerName="container-00" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.663413 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="81d2adbd-2cb1-4cea-bf8a-84cab688ec22" containerName="registry-server" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.664615 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.778842 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.778999 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tb5k\" (UniqueName: \"kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.882607 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.882866 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.882901 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tb5k\" (UniqueName: \"kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.918967 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tb5k\" (UniqueName: \"kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k\") pod \"crc-debug-sbr69\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:34 crc kubenswrapper[4967]: I1121 17:52:34.998154 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:35 crc kubenswrapper[4967]: I1121 17:52:35.211432 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-sbr69" event={"ID":"397bd574-6d9b-4868-aae3-30374446614f","Type":"ContainerStarted","Data":"6295a9f754e5bb5f093f39ba4e2e42b5c59405f70db85d1f6a310570a2c9f9b2"} Nov 21 17:52:36 crc kubenswrapper[4967]: I1121 17:52:36.226390 4967 generic.go:334] "Generic (PLEG): container finished" podID="397bd574-6d9b-4868-aae3-30374446614f" containerID="99da2aecc36ddce379682f5890abf61131c8fc0b45c634e936c7986219d7c1cc" exitCode=0 Nov 21 17:52:36 crc kubenswrapper[4967]: I1121 17:52:36.226489 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-sbr69" event={"ID":"397bd574-6d9b-4868-aae3-30374446614f","Type":"ContainerDied","Data":"99da2aecc36ddce379682f5890abf61131c8fc0b45c634e936c7986219d7c1cc"} Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.371457 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.461693 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tb5k\" (UniqueName: \"kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k\") pod \"397bd574-6d9b-4868-aae3-30374446614f\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.461903 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host\") pod \"397bd574-6d9b-4868-aae3-30374446614f\" (UID: \"397bd574-6d9b-4868-aae3-30374446614f\") " Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.462070 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host" (OuterVolumeSpecName: "host") pod "397bd574-6d9b-4868-aae3-30374446614f" (UID: "397bd574-6d9b-4868-aae3-30374446614f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.463196 4967 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/397bd574-6d9b-4868-aae3-30374446614f-host\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.471733 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k" (OuterVolumeSpecName: "kube-api-access-4tb5k") pod "397bd574-6d9b-4868-aae3-30374446614f" (UID: "397bd574-6d9b-4868-aae3-30374446614f"). InnerVolumeSpecName "kube-api-access-4tb5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:52:37 crc kubenswrapper[4967]: I1121 17:52:37.569021 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tb5k\" (UniqueName: \"kubernetes.io/projected/397bd574-6d9b-4868-aae3-30374446614f-kube-api-access-4tb5k\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:38 crc kubenswrapper[4967]: I1121 17:52:38.269895 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-sbr69" event={"ID":"397bd574-6d9b-4868-aae3-30374446614f","Type":"ContainerDied","Data":"6295a9f754e5bb5f093f39ba4e2e42b5c59405f70db85d1f6a310570a2c9f9b2"} Nov 21 17:52:38 crc kubenswrapper[4967]: I1121 17:52:38.269952 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-sbr69" Nov 21 17:52:38 crc kubenswrapper[4967]: I1121 17:52:38.269960 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6295a9f754e5bb5f093f39ba4e2e42b5c59405f70db85d1f6a310570a2c9f9b2" Nov 21 17:52:39 crc kubenswrapper[4967]: I1121 17:52:39.523916 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-sbr69"] Nov 21 17:52:39 crc kubenswrapper[4967]: I1121 17:52:39.538909 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-sbr69"] Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.556497 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="397bd574-6d9b-4868-aae3-30374446614f" path="/var/lib/kubelet/pods/397bd574-6d9b-4868-aae3-30374446614f/volumes" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.753696 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-97nf5/crc-debug-jm4pc"] Nov 21 17:52:40 crc kubenswrapper[4967]: E1121 17:52:40.754476 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="397bd574-6d9b-4868-aae3-30374446614f" containerName="container-00" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.754498 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="397bd574-6d9b-4868-aae3-30374446614f" containerName="container-00" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.754747 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="397bd574-6d9b-4868-aae3-30374446614f" containerName="container-00" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.755640 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.863986 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6xdf\" (UniqueName: \"kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.864444 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.966678 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6xdf\" (UniqueName: \"kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.966772 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.966999 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:40 crc kubenswrapper[4967]: I1121 17:52:40.992282 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6xdf\" (UniqueName: \"kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf\") pod \"crc-debug-jm4pc\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.092921 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.314983 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" event={"ID":"b5d2e94a-50f7-4955-a151-f2a3384bd36b","Type":"ContainerStarted","Data":"954d1a90f932073cb297408acc60f790ba95e9d1be2a4788322fa80458ea9811"} Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.897918 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.904127 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.915898 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.997170 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.997274 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhr7t\" (UniqueName: \"kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:41 crc kubenswrapper[4967]: I1121 17:52:41.997830 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.100589 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.100754 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.100817 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhr7t\" (UniqueName: \"kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.101438 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.101532 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.125010 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhr7t\" (UniqueName: \"kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t\") pod \"certified-operators-v9694\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.228266 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.350127 4967 generic.go:334] "Generic (PLEG): container finished" podID="b5d2e94a-50f7-4955-a151-f2a3384bd36b" containerID="9485854fd0a2d428590b7a0f2569f65d5262cee5a34f800c8897d681152940ab" exitCode=0 Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.350193 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" event={"ID":"b5d2e94a-50f7-4955-a151-f2a3384bd36b","Type":"ContainerDied","Data":"9485854fd0a2d428590b7a0f2569f65d5262cee5a34f800c8897d681152940ab"} Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.433189 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-jm4pc"] Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.461250 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-97nf5/crc-debug-jm4pc"] Nov 21 17:52:42 crc kubenswrapper[4967]: I1121 17:52:42.809294 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.363182 4967 generic.go:334] "Generic (PLEG): container finished" podID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerID="8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321" exitCode=0 Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.363271 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerDied","Data":"8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321"} Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.363624 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerStarted","Data":"20b30dae29bc106131ebfff326595f00890626456c0a2ff92023b861f6925847"} Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.496686 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.642886 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host\") pod \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.642959 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host" (OuterVolumeSpecName: "host") pod "b5d2e94a-50f7-4955-a151-f2a3384bd36b" (UID: "b5d2e94a-50f7-4955-a151-f2a3384bd36b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.642995 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6xdf\" (UniqueName: \"kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf\") pod \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\" (UID: \"b5d2e94a-50f7-4955-a151-f2a3384bd36b\") " Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.643548 4967 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b5d2e94a-50f7-4955-a151-f2a3384bd36b-host\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.648531 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf" (OuterVolumeSpecName: "kube-api-access-z6xdf") pod "b5d2e94a-50f7-4955-a151-f2a3384bd36b" (UID: "b5d2e94a-50f7-4955-a151-f2a3384bd36b"). InnerVolumeSpecName "kube-api-access-z6xdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:52:43 crc kubenswrapper[4967]: I1121 17:52:43.746198 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6xdf\" (UniqueName: \"kubernetes.io/projected/b5d2e94a-50f7-4955-a151-f2a3384bd36b-kube-api-access-z6xdf\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:44 crc kubenswrapper[4967]: I1121 17:52:44.387747 4967 scope.go:117] "RemoveContainer" containerID="9485854fd0a2d428590b7a0f2569f65d5262cee5a34f800c8897d681152940ab" Nov 21 17:52:44 crc kubenswrapper[4967]: I1121 17:52:44.389457 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/crc-debug-jm4pc" Nov 21 17:52:44 crc kubenswrapper[4967]: I1121 17:52:44.398764 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerStarted","Data":"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872"} Nov 21 17:52:44 crc kubenswrapper[4967]: I1121 17:52:44.550137 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5d2e94a-50f7-4955-a151-f2a3384bd36b" path="/var/lib/kubelet/pods/b5d2e94a-50f7-4955-a151-f2a3384bd36b/volumes" Nov 21 17:52:46 crc kubenswrapper[4967]: I1121 17:52:46.440112 4967 generic.go:334] "Generic (PLEG): container finished" podID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerID="3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872" exitCode=0 Nov 21 17:52:46 crc kubenswrapper[4967]: I1121 17:52:46.440419 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerDied","Data":"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872"} Nov 21 17:52:47 crc kubenswrapper[4967]: I1121 17:52:47.457892 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerStarted","Data":"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b"} Nov 21 17:52:47 crc kubenswrapper[4967]: I1121 17:52:47.482142 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v9694" podStartSLOduration=2.976417309 podStartE2EDuration="6.482119477s" podCreationTimestamp="2025-11-21 17:52:41 +0000 UTC" firstStartedPulling="2025-11-21 17:52:43.367775182 +0000 UTC m=+8251.626296190" lastFinishedPulling="2025-11-21 17:52:46.87347735 +0000 UTC m=+8255.131998358" observedRunningTime="2025-11-21 17:52:47.478095802 +0000 UTC m=+8255.736616810" watchObservedRunningTime="2025-11-21 17:52:47.482119477 +0000 UTC m=+8255.740640475" Nov 21 17:52:52 crc kubenswrapper[4967]: I1121 17:52:52.229500 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:52 crc kubenswrapper[4967]: I1121 17:52:52.230157 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:52 crc kubenswrapper[4967]: I1121 17:52:52.288996 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:52 crc kubenswrapper[4967]: I1121 17:52:52.622111 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:52 crc kubenswrapper[4967]: I1121 17:52:52.727841 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:54 crc kubenswrapper[4967]: I1121 17:52:54.567413 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v9694" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="registry-server" containerID="cri-o://0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b" gracePeriod=2 Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.172918 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.304658 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content\") pod \"0a963506-b742-42a8-89f6-75ddfc3afc01\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.304837 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities\") pod \"0a963506-b742-42a8-89f6-75ddfc3afc01\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.305418 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhr7t\" (UniqueName: \"kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t\") pod \"0a963506-b742-42a8-89f6-75ddfc3afc01\" (UID: \"0a963506-b742-42a8-89f6-75ddfc3afc01\") " Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.305998 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities" (OuterVolumeSpecName: "utilities") pod "0a963506-b742-42a8-89f6-75ddfc3afc01" (UID: "0a963506-b742-42a8-89f6-75ddfc3afc01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.315533 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t" (OuterVolumeSpecName: "kube-api-access-vhr7t") pod "0a963506-b742-42a8-89f6-75ddfc3afc01" (UID: "0a963506-b742-42a8-89f6-75ddfc3afc01"). InnerVolumeSpecName "kube-api-access-vhr7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.360034 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a963506-b742-42a8-89f6-75ddfc3afc01" (UID: "0a963506-b742-42a8-89f6-75ddfc3afc01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.408611 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.408654 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhr7t\" (UniqueName: \"kubernetes.io/projected/0a963506-b742-42a8-89f6-75ddfc3afc01-kube-api-access-vhr7t\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.408667 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a963506-b742-42a8-89f6-75ddfc3afc01-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.583041 4967 generic.go:334] "Generic (PLEG): container finished" podID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerID="0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b" exitCode=0 Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.583120 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerDied","Data":"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b"} Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.583173 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v9694" event={"ID":"0a963506-b742-42a8-89f6-75ddfc3afc01","Type":"ContainerDied","Data":"20b30dae29bc106131ebfff326595f00890626456c0a2ff92023b861f6925847"} Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.583197 4967 scope.go:117] "RemoveContainer" containerID="0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.584906 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v9694" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.627592 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.633507 4967 scope.go:117] "RemoveContainer" containerID="3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.641152 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v9694"] Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.669568 4967 scope.go:117] "RemoveContainer" containerID="8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.736113 4967 scope.go:117] "RemoveContainer" containerID="0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b" Nov 21 17:52:55 crc kubenswrapper[4967]: E1121 17:52:55.736963 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b\": container with ID starting with 0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b not found: ID does not exist" containerID="0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.737034 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b"} err="failed to get container status \"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b\": rpc error: code = NotFound desc = could not find container \"0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b\": container with ID starting with 0eecc4356a0efcef801eb5205682819106d87722024496feb18dcfe0eb32ec0b not found: ID does not exist" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.737077 4967 scope.go:117] "RemoveContainer" containerID="3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872" Nov 21 17:52:55 crc kubenswrapper[4967]: E1121 17:52:55.737719 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872\": container with ID starting with 3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872 not found: ID does not exist" containerID="3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.737768 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872"} err="failed to get container status \"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872\": rpc error: code = NotFound desc = could not find container \"3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872\": container with ID starting with 3b332c73429987b09eac410c30aa2ec58093f8f7a033949ca36ebeb630891872 not found: ID does not exist" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.737788 4967 scope.go:117] "RemoveContainer" containerID="8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321" Nov 21 17:52:55 crc kubenswrapper[4967]: E1121 17:52:55.738173 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321\": container with ID starting with 8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321 not found: ID does not exist" containerID="8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321" Nov 21 17:52:55 crc kubenswrapper[4967]: I1121 17:52:55.738203 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321"} err="failed to get container status \"8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321\": rpc error: code = NotFound desc = could not find container \"8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321\": container with ID starting with 8261f5c156122c365d444db8708e40ff5d2a73881a36d7060167a97728a95321 not found: ID does not exist" Nov 21 17:52:56 crc kubenswrapper[4967]: I1121 17:52:56.560838 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" path="/var/lib/kubelet/pods/0a963506-b742-42a8-89f6-75ddfc3afc01/volumes" Nov 21 17:53:03 crc kubenswrapper[4967]: I1121 17:53:03.003811 4967 patch_prober.go:28] interesting pod/route-controller-manager-745bcc88bb-ncrhx container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.80:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 17:53:03 crc kubenswrapper[4967]: I1121 17:53:03.004695 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" podUID="7ddc1c0c-9146-444c-8597-7be6bb68a530" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.80:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 17:53:03 crc kubenswrapper[4967]: I1121 17:53:03.011550 4967 patch_prober.go:28] interesting pod/route-controller-manager-745bcc88bb-ncrhx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.80:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 17:53:03 crc kubenswrapper[4967]: I1121 17:53:03.011814 4967 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-745bcc88bb-ncrhx" podUID="7ddc1c0c-9146-444c-8597-7be6bb68a530" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.80:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.248951 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_067d67d5-6dd0-43b7-ab83-8765ac6f10ac/aodh-api/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.425266 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_067d67d5-6dd0-43b7-ab83-8765ac6f10ac/aodh-listener/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.455393 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_067d67d5-6dd0-43b7-ab83-8765ac6f10ac/aodh-evaluator/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.500026 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_067d67d5-6dd0-43b7-ab83-8765ac6f10ac/aodh-notifier/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.693411 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5969866c74-lgff2_16a9a790-55c8-4924-ae4d-c788238f8211/barbican-api/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.724166 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5969866c74-lgff2_16a9a790-55c8-4924-ae4d-c788238f8211/barbican-api-log/0.log" Nov 21 17:53:10 crc kubenswrapper[4967]: I1121 17:53:10.829030 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-8784d986b-zqb7r_4ffbf986-245c-45b4-b6e1-544c887362be/barbican-keystone-listener/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.275120 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-8784d986b-zqb7r_4ffbf986-245c-45b4-b6e1-544c887362be/barbican-keystone-listener-log/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.280635 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bb45fb999-wtdcm_166a1693-08af-47c6-a9b7-283fb1edfc10/barbican-worker/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.315439 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-bb45fb999-wtdcm_166a1693-08af-47c6-a9b7-283fb1edfc10/barbican-worker-log/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.552240 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-xr8mw_b43aaf4b-3291-4e5a-b01d-ee1365c62ab2/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.612460 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_be34fe3c-c5f0-4eaf-a694-02b5b5bf343b/ceilometer-central-agent/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.739621 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_be34fe3c-c5f0-4eaf-a694-02b5b5bf343b/ceilometer-notification-agent/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.810222 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_be34fe3c-c5f0-4eaf-a694-02b5b5bf343b/sg-core/0.log" Nov 21 17:53:11 crc kubenswrapper[4967]: I1121 17:53:11.826329 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_be34fe3c-c5f0-4eaf-a694-02b5b5bf343b/proxy-httpd/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.057448 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_57e505d7-beb3-43ca-a03d-c5ae00347bc0/cinder-api-log/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.092949 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_57e505d7-beb3-43ca-a03d-c5ae00347bc0/cinder-api/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.238233 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_2c76f304-d7ff-4488-bf08-228d143dae3d/cinder-scheduler/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.358724 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_2c76f304-d7ff-4488-bf08-228d143dae3d/probe/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.407616 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-f4z7r_ba2532f1-bd7f-4c44-b59e-4cdcd2ac7fe2/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.612589 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-lfsf2_b068be91-0b69-4778-b47a-2ecb6a9c040a/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.695538 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-9kxh6_c276d90d-6f12-4909-9b88-cb881f3f8b74/init/0.log" Nov 21 17:53:12 crc kubenswrapper[4967]: I1121 17:53:12.890512 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-9kxh6_c276d90d-6f12-4909-9b88-cb881f3f8b74/init/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.011764 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-v6cnw_66c72c31-e791-478b-bbfc-3ba795c580e9/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.015578 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-9kxh6_c276d90d-6f12-4909-9b88-cb881f3f8b74/dnsmasq-dns/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.293075 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_524adcf3-b5fb-468d-8964-f88d58729d57/glance-log/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.314995 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_524adcf3-b5fb-468d-8964-f88d58729d57/glance-httpd/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.551475 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2b52e62c-c848-4f1d-8886-1d799e1d41da/glance-httpd/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.628216 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2b52e62c-c848-4f1d-8886-1d799e1d41da/glance-log/0.log" Nov 21 17:53:13 crc kubenswrapper[4967]: I1121 17:53:13.881597 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-6f79c69644-jcsfk_c5ca4dfe-6ce8-4971-b3d4-b7ddbffa9606/heat-api/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.151274 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-5f5f4d85f8-g4l64_b748e8f7-dfc1-4fe2-a186-8ac74cc57bc3/heat-cfnapi/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.193514 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-79b74c6887-kxssl_ed27e21a-aab4-4d97-a6de-34158f1e1e03/heat-engine/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.316571 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-lqz5b_4aaa5027-a173-4854-90e9-69635bd3cd76/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.430102 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-pngrx_ba294b04-629f-4369-be8f-07debefffcb8/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.666008 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29395681-wbns7_38e452a5-595e-4749-9310-48c09e18e32a/keystone-cron/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.782338 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29395741-7nk8d_97c2ceb6-473f-408c-8e7b-796f7f655f68/keystone-cron/0.log" Nov 21 17:53:14 crc kubenswrapper[4967]: I1121 17:53:14.987743 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_48d66520-3487-4842-b42f-5db405361e11/kube-state-metrics/0.log" Nov 21 17:53:15 crc kubenswrapper[4967]: I1121 17:53:15.127001 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-7fkwq_1f68112d-a2c4-44ac-92bb-c24db6e767c0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:15 crc kubenswrapper[4967]: I1121 17:53:15.192877 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c9cd95b4b-sqmvx_cfda2b1a-4625-4150-9b58-c958f677ceb6/keystone-api/0.log" Nov 21 17:53:15 crc kubenswrapper[4967]: I1121 17:53:15.283293 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_logging-edpm-deployment-openstack-edpm-ipam-6knkp_e8d33040-61f0-4a55-9df6-cfa0b1513c43/logging-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:15 crc kubenswrapper[4967]: I1121 17:53:15.665376 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_b6c61443-128e-48f7-9753-0283a3a7d3ba/mysqld-exporter/0.log" Nov 21 17:53:15 crc kubenswrapper[4967]: I1121 17:53:15.955528 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-xwbbv_492f2f9f-3f85-4fdd-a247-15d403c3bb87/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:16 crc kubenswrapper[4967]: I1121 17:53:16.105998 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f8c4b98b5-tmhs4_44bed227-df87-4bda-8b89-2d54dc5735a4/neutron-httpd/0.log" Nov 21 17:53:16 crc kubenswrapper[4967]: I1121 17:53:16.137403 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f8c4b98b5-tmhs4_44bed227-df87-4bda-8b89-2d54dc5735a4/neutron-api/0.log" Nov 21 17:53:17 crc kubenswrapper[4967]: I1121 17:53:17.022440 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_f3996bc6-3f27-4ff9-bcc8-688a7ffb6991/nova-cell0-conductor-conductor/0.log" Nov 21 17:53:17 crc kubenswrapper[4967]: I1121 17:53:17.350197 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_4d004044-caa6-4813-9747-e18ca2f2ba9d/nova-cell1-conductor-conductor/0.log" Nov 21 17:53:17 crc kubenswrapper[4967]: I1121 17:53:17.829294 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_2e73aeef-13eb-4c55-9e06-bb56b49c9e5c/nova-cell1-novncproxy-novncproxy/0.log" Nov 21 17:53:17 crc kubenswrapper[4967]: I1121 17:53:17.922994 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-q6jx5_cebe1330-948b-4004-b244-fa4e3e22f1de/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:18 crc kubenswrapper[4967]: I1121 17:53:18.022614 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a479d799-9383-4d3a-bdd5-f4987c29d00b/nova-api-log/0.log" Nov 21 17:53:18 crc kubenswrapper[4967]: I1121 17:53:18.332854 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_69ae1faf-e6af-4799-bf8e-3fd8d975235c/nova-metadata-log/0.log" Nov 21 17:53:18 crc kubenswrapper[4967]: I1121 17:53:18.744889 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_15496beb-fac6-4c92-b831-0e553160acd8/nova-scheduler-scheduler/0.log" Nov 21 17:53:18 crc kubenswrapper[4967]: I1121 17:53:18.813912 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_a479d799-9383-4d3a-bdd5-f4987c29d00b/nova-api-api/0.log" Nov 21 17:53:18 crc kubenswrapper[4967]: I1121 17:53:18.916829 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b3d39ab9-f219-4af5-b82c-102fefaff9bc/mysql-bootstrap/0.log" Nov 21 17:53:19 crc kubenswrapper[4967]: I1121 17:53:19.203368 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b3d39ab9-f219-4af5-b82c-102fefaff9bc/mysql-bootstrap/0.log" Nov 21 17:53:19 crc kubenswrapper[4967]: I1121 17:53:19.275872 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_b3d39ab9-f219-4af5-b82c-102fefaff9bc/galera/0.log" Nov 21 17:53:19 crc kubenswrapper[4967]: I1121 17:53:19.531822 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d7a085e-ccb2-4791-9bdb-e3c564e8b450/mysql-bootstrap/0.log" Nov 21 17:53:19 crc kubenswrapper[4967]: I1121 17:53:19.872190 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d7a085e-ccb2-4791-9bdb-e3c564e8b450/mysql-bootstrap/0.log" Nov 21 17:53:19 crc kubenswrapper[4967]: I1121 17:53:19.890926 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d7a085e-ccb2-4791-9bdb-e3c564e8b450/galera/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.150987 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_4ff8059c-8c40-4326-b477-95c43286eb35/openstackclient/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.171152 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-5c2pr_e04788f9-f223-46ef-b96b-24e05c5d911f/ovn-controller/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.451486 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-kdj29_77e7b435-56ef-4877-9fd1-cfd83b68209e/openstack-network-exporter/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.690690 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n8r27_83956a70-e80b-424b-9396-8febf34b60ed/ovsdb-server-init/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.907116 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n8r27_83956a70-e80b-424b-9396-8febf34b60ed/ovsdb-server-init/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.925622 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n8r27_83956a70-e80b-424b-9396-8febf34b60ed/ovsdb-server/0.log" Nov 21 17:53:20 crc kubenswrapper[4967]: I1121 17:53:20.953263 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-n8r27_83956a70-e80b-424b-9396-8febf34b60ed/ovs-vswitchd/0.log" Nov 21 17:53:21 crc kubenswrapper[4967]: I1121 17:53:21.161406 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-w5klg_19f7acab-a57b-4fcb-b66b-e988058d14ae/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:21 crc kubenswrapper[4967]: I1121 17:53:21.824511 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7d17eb49-4204-4589-82ac-c147f1b7b456/openstack-network-exporter/0.log" Nov 21 17:53:21 crc kubenswrapper[4967]: I1121 17:53:21.884947 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_7d17eb49-4204-4589-82ac-c147f1b7b456/ovn-northd/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.051501 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_b0cdc464-b000-47e5-a8d8-0a881ba447c1/openstack-network-exporter/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.199277 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_b0cdc464-b000-47e5-a8d8-0a881ba447c1/ovsdbserver-nb/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.290849 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1af05a7c-7c8a-42fa-a520-047cc273227b/openstack-network-exporter/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.353876 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_69ae1faf-e6af-4799-bf8e-3fd8d975235c/nova-metadata-metadata/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.432737 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_1af05a7c-7c8a-42fa-a520-047cc273227b/ovsdbserver-sb/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.821469 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9877cab6-ed78-4e94-83c9-b2a127e3b7b0/init-config-reloader/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.835642 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6d9cf75cd4-wgblt_47bf4cdc-4292-43c3-b9c7-2bb28905204b/placement-api/0.log" Nov 21 17:53:22 crc kubenswrapper[4967]: I1121 17:53:22.864130 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6d9cf75cd4-wgblt_47bf4cdc-4292-43c3-b9c7-2bb28905204b/placement-log/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.041822 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9877cab6-ed78-4e94-83c9-b2a127e3b7b0/config-reloader/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.066654 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9877cab6-ed78-4e94-83c9-b2a127e3b7b0/init-config-reloader/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.074409 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9877cab6-ed78-4e94-83c9-b2a127e3b7b0/thanos-sidecar/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.150590 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9877cab6-ed78-4e94-83c9-b2a127e3b7b0/prometheus/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.307586 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2d6d9318-48b9-4b12-9532-2c449dd948a6/setup-container/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.514421 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2d6d9318-48b9-4b12-9532-2c449dd948a6/setup-container/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.612954 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_2d6d9318-48b9-4b12-9532-2c449dd948a6/rabbitmq/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.721284 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_235ca898-447b-4df0-9aef-3bf2bc1719ce/setup-container/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.987615 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_235ca898-447b-4df0-9aef-3bf2bc1719ce/setup-container/0.log" Nov 21 17:53:23 crc kubenswrapper[4967]: I1121 17:53:23.990014 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_235ca898-447b-4df0-9aef-3bf2bc1719ce/rabbitmq/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.092339 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-nk7t9_6559f1f0-c99e-49ba-8108-d57a8bf60d33/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.241429 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-xlrwf_4af462cc-74bb-4ef1-bece-cd54d27bb7ef/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.437987 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-wcr8r_cc803d7b-a544-4388-b5c0-3debc0789e8e/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.575188 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-pg7sh_8a6f4649-48ad-45af-90ca-ddf024c34a33/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.757633 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-pv72w_11231449-813f-4d1a-846e-997b1de5349f/ssh-known-hosts-edpm-deployment/0.log" Nov 21 17:53:24 crc kubenswrapper[4967]: I1121 17:53:24.962433 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-794fb7d789-mkxk2_9488c46d-11de-4819-9784-e32e3893a5d9/proxy-server/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.084815 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-bf2mc_b1974654-371e-49f8-b8d3-701e31f82b54/swift-ring-rebalance/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.230121 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-794fb7d789-mkxk2_9488c46d-11de-4819-9784-e32e3893a5d9/proxy-httpd/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.272601 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/account-auditor/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.335265 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/account-reaper/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.509108 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/account-server/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.610972 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/container-auditor/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.620091 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/account-replicator/0.log" Nov 21 17:53:25 crc kubenswrapper[4967]: I1121 17:53:25.945961 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/container-replicator/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.093196 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/container-server/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.169394 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/container-updater/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.241026 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/object-auditor/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.280718 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/object-expirer/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.426122 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/object-server/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.432664 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/object-replicator/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.540645 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/rsync/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.545679 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/object-updater/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.716132 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_4bf5cb6a-c8f8-43c3-b546-282bfd3244e2/swift-recon-cron/0.log" Nov 21 17:53:26 crc kubenswrapper[4967]: I1121 17:53:26.872693 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-6ls69_81fb37a5-540d-440d-b0f7-3ba11bad7c42/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:27 crc kubenswrapper[4967]: I1121 17:53:27.102364 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-power-monitoring-edpm-deployment-openstack-edpm-j4jm7_5f0377eb-356a-49e8-9919-765ca8e2fb52/telemetry-power-monitoring-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:27 crc kubenswrapper[4967]: I1121 17:53:27.299853 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2bacaf27-62d6-464e-84ae-0f8e30cf0147/test-operator-logs-container/0.log" Nov 21 17:53:27 crc kubenswrapper[4967]: I1121 17:53:27.523576 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-7vxrc_0abd8e9d-af5c-4b71-884e-03155f8a630a/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 17:53:30 crc kubenswrapper[4967]: I1121 17:53:30.061598 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_e74fdffd-f5c7-4be6-8d37-5d9e07704aaa/tempest-tests-tempest-tests-runner/0.log" Nov 21 17:53:43 crc kubenswrapper[4967]: I1121 17:53:43.582181 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_2af9b421-461a-4411-8a7d-9a0bf5fa8d28/memcached/0.log" Nov 21 17:53:46 crc kubenswrapper[4967]: I1121 17:53:46.522551 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:53:46 crc kubenswrapper[4967]: I1121 17:53:46.523249 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.367660 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:53:51 crc kubenswrapper[4967]: E1121 17:53:51.370966 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5d2e94a-50f7-4955-a151-f2a3384bd36b" containerName="container-00" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.371078 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5d2e94a-50f7-4955-a151-f2a3384bd36b" containerName="container-00" Nov 21 17:53:51 crc kubenswrapper[4967]: E1121 17:53:51.371212 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="extract-content" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.371287 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="extract-content" Nov 21 17:53:51 crc kubenswrapper[4967]: E1121 17:53:51.371414 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="registry-server" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.371496 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="registry-server" Nov 21 17:53:51 crc kubenswrapper[4967]: E1121 17:53:51.371594 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="extract-utilities" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.371675 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="extract-utilities" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.372058 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a963506-b742-42a8-89f6-75ddfc3afc01" containerName="registry-server" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.372192 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5d2e94a-50f7-4955-a151-f2a3384bd36b" containerName="container-00" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.376491 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.396005 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.453940 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.454152 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2pnt\" (UniqueName: \"kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.454219 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.557093 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.557258 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2pnt\" (UniqueName: \"kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.557299 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.557672 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.557801 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.578136 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2pnt\" (UniqueName: \"kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt\") pod \"community-operators-kqkjr\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:51 crc kubenswrapper[4967]: I1121 17:53:51.706500 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:53:52 crc kubenswrapper[4967]: I1121 17:53:52.310078 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:53:52 crc kubenswrapper[4967]: I1121 17:53:52.823057 4967 generic.go:334] "Generic (PLEG): container finished" podID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerID="31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9" exitCode=0 Nov 21 17:53:52 crc kubenswrapper[4967]: I1121 17:53:52.823803 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerDied","Data":"31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9"} Nov 21 17:53:52 crc kubenswrapper[4967]: I1121 17:53:52.823847 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerStarted","Data":"efe6700945a20aa313661e7712bced7653251eb42596a5eda21488b9775bcba5"} Nov 21 17:53:54 crc kubenswrapper[4967]: I1121 17:53:54.856801 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerStarted","Data":"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87"} Nov 21 17:53:55 crc kubenswrapper[4967]: I1121 17:53:55.879234 4967 generic.go:334] "Generic (PLEG): container finished" podID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerID="82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87" exitCode=0 Nov 21 17:53:55 crc kubenswrapper[4967]: I1121 17:53:55.879566 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerDied","Data":"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87"} Nov 21 17:53:56 crc kubenswrapper[4967]: I1121 17:53:56.897090 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerStarted","Data":"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc"} Nov 21 17:53:56 crc kubenswrapper[4967]: I1121 17:53:56.927662 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kqkjr" podStartSLOduration=2.374160695 podStartE2EDuration="5.927640269s" podCreationTimestamp="2025-11-21 17:53:51 +0000 UTC" firstStartedPulling="2025-11-21 17:53:52.833470901 +0000 UTC m=+8321.091991909" lastFinishedPulling="2025-11-21 17:53:56.386950465 +0000 UTC m=+8324.645471483" observedRunningTime="2025-11-21 17:53:56.923988314 +0000 UTC m=+8325.182509322" watchObservedRunningTime="2025-11-21 17:53:56.927640269 +0000 UTC m=+8325.186161277" Nov 21 17:54:01 crc kubenswrapper[4967]: I1121 17:54:01.706670 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:01 crc kubenswrapper[4967]: I1121 17:54:01.707339 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:01 crc kubenswrapper[4967]: I1121 17:54:01.764424 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.015518 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.073048 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.204169 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/util/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.430086 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/util/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.481952 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/pull/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.498291 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/pull/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.808004 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/pull/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.844658 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/extract/0.log" Nov 21 17:54:02 crc kubenswrapper[4967]: I1121 17:54:02.909833 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_82b6106d0a07df371e2e87ba1ca1735beb139e1540c9852292b4813745q4zrc_721e9c44-afb3-47fb-979b-10e52d1c5acc/util/0.log" Nov 21 17:54:03 crc kubenswrapper[4967]: I1121 17:54:03.306258 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-pmb82_c8665113-6713-4abd-8d58-66c16f2d678a/manager/0.log" Nov 21 17:54:03 crc kubenswrapper[4967]: I1121 17:54:03.483790 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-pmb82_c8665113-6713-4abd-8d58-66c16f2d678a/kube-rbac-proxy/0.log" Nov 21 17:54:03 crc kubenswrapper[4967]: I1121 17:54:03.785608 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-65jcd_d1b75c6e-f666-4046-8f22-9a6fd96f9442/kube-rbac-proxy/0.log" Nov 21 17:54:03 crc kubenswrapper[4967]: I1121 17:54:03.907470 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-65jcd_d1b75c6e-f666-4046-8f22-9a6fd96f9442/manager/0.log" Nov 21 17:54:03 crc kubenswrapper[4967]: I1121 17:54:03.983229 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kqkjr" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="registry-server" containerID="cri-o://64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc" gracePeriod=2 Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.046833 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-c8rct_8853dca4-97bf-4b91-9523-a383122bd470/kube-rbac-proxy/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.069399 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-c8rct_8853dca4-97bf-4b91-9523-a383122bd470/manager/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.269702 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-lrlvb_680de92b-f127-4cb3-86c4-3e4b9ae183df/kube-rbac-proxy/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.473625 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-lrlvb_680de92b-f127-4cb3-86c4-3e4b9ae183df/manager/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.579630 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-bxhgh_fcb67210-f9d3-483b-aa07-6f332130450c/kube-rbac-proxy/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.656971 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-bxhgh_fcb67210-f9d3-483b-aa07-6f332130450c/manager/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.732146 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.798492 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-dvhqb_bb306581-4364-431e-866d-49a92b74eab5/kube-rbac-proxy/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.836748 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-dvhqb_bb306581-4364-431e-866d-49a92b74eab5/manager/0.log" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.905169 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content\") pod \"a9ca5420-0420-4255-802a-3dab2c6285ea\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.905470 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2pnt\" (UniqueName: \"kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt\") pod \"a9ca5420-0420-4255-802a-3dab2c6285ea\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.905670 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities\") pod \"a9ca5420-0420-4255-802a-3dab2c6285ea\" (UID: \"a9ca5420-0420-4255-802a-3dab2c6285ea\") " Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.906521 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities" (OuterVolumeSpecName: "utilities") pod "a9ca5420-0420-4255-802a-3dab2c6285ea" (UID: "a9ca5420-0420-4255-802a-3dab2c6285ea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.907745 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.914636 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt" (OuterVolumeSpecName: "kube-api-access-b2pnt") pod "a9ca5420-0420-4255-802a-3dab2c6285ea" (UID: "a9ca5420-0420-4255-802a-3dab2c6285ea"). InnerVolumeSpecName "kube-api-access-b2pnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.967486 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9ca5420-0420-4255-802a-3dab2c6285ea" (UID: "a9ca5420-0420-4255-802a-3dab2c6285ea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.996101 4967 generic.go:334] "Generic (PLEG): container finished" podID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerID="64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc" exitCode=0 Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.996151 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerDied","Data":"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc"} Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.996184 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kqkjr" event={"ID":"a9ca5420-0420-4255-802a-3dab2c6285ea","Type":"ContainerDied","Data":"efe6700945a20aa313661e7712bced7653251eb42596a5eda21488b9775bcba5"} Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.996207 4967 scope.go:117] "RemoveContainer" containerID="64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc" Nov 21 17:54:04 crc kubenswrapper[4967]: I1121 17:54:04.996385 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kqkjr" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.010856 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9ca5420-0420-4255-802a-3dab2c6285ea-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.010910 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2pnt\" (UniqueName: \"kubernetes.io/projected/a9ca5420-0420-4255-802a-3dab2c6285ea-kube-api-access-b2pnt\") on node \"crc\" DevicePath \"\"" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.039380 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.051806 4967 scope.go:117] "RemoveContainer" containerID="82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.052989 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kqkjr"] Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.096815 4967 scope.go:117] "RemoveContainer" containerID="31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.137688 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-9rqgf_ffe49522-20f7-4f17-9209-a782306baf71/kube-rbac-proxy/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.148680 4967 scope.go:117] "RemoveContainer" containerID="64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc" Nov 21 17:54:05 crc kubenswrapper[4967]: E1121 17:54:05.149396 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc\": container with ID starting with 64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc not found: ID does not exist" containerID="64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.149455 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc"} err="failed to get container status \"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc\": rpc error: code = NotFound desc = could not find container \"64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc\": container with ID starting with 64151db62d8e13db17c80efff8bbfbfd8a49c98b54a88682f1b747ce414abebc not found: ID does not exist" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.149488 4967 scope.go:117] "RemoveContainer" containerID="82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87" Nov 21 17:54:05 crc kubenswrapper[4967]: E1121 17:54:05.149947 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87\": container with ID starting with 82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87 not found: ID does not exist" containerID="82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.150008 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87"} err="failed to get container status \"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87\": rpc error: code = NotFound desc = could not find container \"82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87\": container with ID starting with 82805113034ab2a493e681c4156dcf09bd936ffd08b056ada96d3ba399237c87 not found: ID does not exist" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.150057 4967 scope.go:117] "RemoveContainer" containerID="31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9" Nov 21 17:54:05 crc kubenswrapper[4967]: E1121 17:54:05.150915 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9\": container with ID starting with 31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9 not found: ID does not exist" containerID="31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.150946 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9"} err="failed to get container status \"31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9\": rpc error: code = NotFound desc = could not find container \"31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9\": container with ID starting with 31850d53ffd8f665d52fb0c2f42742150c7ebb08b353ab70756782744e63bab9 not found: ID does not exist" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.316149 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-44jql_9782f058-db28-4c8b-b1b7-ee270c4d76b4/kube-rbac-proxy/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.322771 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-9rqgf_ffe49522-20f7-4f17-9209-a782306baf71/manager/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.395842 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-44jql_9782f058-db28-4c8b-b1b7-ee270c4d76b4/manager/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.590851 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-kvg22_5d919036-74e8-4637-b93e-fefc337cf51a/manager/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.606448 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-kvg22_5d919036-74e8-4637-b93e-fefc337cf51a/kube-rbac-proxy/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.660982 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-9m8rh_43bebf9f-0691-416d-91e2-232a3a4d37d8/kube-rbac-proxy/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.877196 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-9m8rh_43bebf9f-0691-416d-91e2-232a3a4d37d8/manager/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.892851 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-xjrxl_95150f6f-2cf8-490f-a9fe-c01038ca1807/manager/0.log" Nov 21 17:54:05 crc kubenswrapper[4967]: I1121 17:54:05.908885 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-xjrxl_95150f6f-2cf8-490f-a9fe-c01038ca1807/kube-rbac-proxy/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.086752 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-dcz62_0b76a724-2c3b-47e1-a6bd-daada9e96cea/kube-rbac-proxy/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.222026 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-dcz62_0b76a724-2c3b-47e1-a6bd-daada9e96cea/manager/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.260404 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-jt8hb_7f1b9439-5d7e-462e-b4ce-2cfa70363101/kube-rbac-proxy/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.400290 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-jt8hb_7f1b9439-5d7e-462e-b4ce-2cfa70363101/manager/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.487994 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qjbwj_8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d/kube-rbac-proxy/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.515501 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-qjbwj_8d84a26e-2d5b-41e3-95fa-60e2bdc67b0d/manager/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.550010 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" path="/var/lib/kubelet/pods/a9ca5420-0420-4255-802a-3dab2c6285ea/volumes" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.606145 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-xq76g_26d40ea4-3e61-4911-b5d4-a87a06b6698e/kube-rbac-proxy/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.721603 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-xq76g_26d40ea4-3e61-4911-b5d4-a87a06b6698e/manager/0.log" Nov 21 17:54:06 crc kubenswrapper[4967]: I1121 17:54:06.758586 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-648ff6d765-v2pj5_cb33f2a5-e4b0-4ebf-9ddb-03979139e785/kube-rbac-proxy/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.233486 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5c6987f5c-tljdg_073aff39-0ebc-4283-9a05-ec6b8e0abbd5/kube-rbac-proxy/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.403394 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-bq7fn_f5c26992-d25c-48e3-97f2-4260d4489c53/registry-server/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.408510 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5c6987f5c-tljdg_073aff39-0ebc-4283-9a05-ec6b8e0abbd5/operator/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.506532 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-jsj2l_dce06b30-88dd-4beb-b4cb-7982ed0a8000/kube-rbac-proxy/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.704083 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-jsj2l_dce06b30-88dd-4beb-b4cb-7982ed0a8000/manager/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.834906 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-b8f9d_e8ef978f-0513-4008-a8f5-07c52a1979bb/manager/0.log" Nov 21 17:54:07 crc kubenswrapper[4967]: I1121 17:54:07.846005 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-b8f9d_e8ef978f-0513-4008-a8f5-07c52a1979bb/kube-rbac-proxy/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.015476 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-mm6fj_e107a04d-5715-481d-94d7-b99ad7f3e95d/operator/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.176624 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-f78mk_51e2d793-61c7-4587-ac51-fb644591ef74/kube-rbac-proxy/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.383191 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-f78mk_51e2d793-61c7-4587-ac51-fb644591ef74/manager/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.425446 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-654d9964b7-j7n2g_fba0599a-65d9-4254-b118-6527649ffb1e/kube-rbac-proxy/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.529090 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-648ff6d765-v2pj5_cb33f2a5-e4b0-4ebf-9ddb-03979139e785/manager/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.792216 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-654d9964b7-j7n2g_fba0599a-65d9-4254-b118-6527649ffb1e/manager/0.log" Nov 21 17:54:08 crc kubenswrapper[4967]: I1121 17:54:08.906717 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-wr7n5_cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1/kube-rbac-proxy/0.log" Nov 21 17:54:09 crc kubenswrapper[4967]: I1121 17:54:09.114181 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-wr7n5_cc2f0cb7-e6eb-41ab-a809-ff5bf52668a1/manager/0.log" Nov 21 17:54:09 crc kubenswrapper[4967]: I1121 17:54:09.229639 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-qh6wx_39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4/kube-rbac-proxy/0.log" Nov 21 17:54:09 crc kubenswrapper[4967]: I1121 17:54:09.241720 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-8c6448b9f-qh6wx_39a393cc-d7e3-4d00-89a2-7fbfaea1e6d4/manager/0.log" Nov 21 17:54:16 crc kubenswrapper[4967]: I1121 17:54:16.522477 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:54:16 crc kubenswrapper[4967]: I1121 17:54:16.522975 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:54:30 crc kubenswrapper[4967]: I1121 17:54:30.852217 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-tz287_5c2198a6-561a-407b-979b-67d05acfb234/control-plane-machine-set-operator/0.log" Nov 21 17:54:31 crc kubenswrapper[4967]: I1121 17:54:31.088904 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-882pz_70fb4095-863d-445d-bc3a-bdb264c4abc1/kube-rbac-proxy/0.log" Nov 21 17:54:31 crc kubenswrapper[4967]: I1121 17:54:31.185182 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-882pz_70fb4095-863d-445d-bc3a-bdb264c4abc1/machine-api-operator/0.log" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.748620 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:54:36 crc kubenswrapper[4967]: E1121 17:54:36.750078 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="registry-server" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.750097 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="registry-server" Nov 21 17:54:36 crc kubenswrapper[4967]: E1121 17:54:36.750120 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="extract-content" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.750129 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="extract-content" Nov 21 17:54:36 crc kubenswrapper[4967]: E1121 17:54:36.750144 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="extract-utilities" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.750152 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="extract-utilities" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.750497 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9ca5420-0420-4255-802a-3dab2c6285ea" containerName="registry-server" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.755821 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.765768 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.858500 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.858590 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.858698 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnmbp\" (UniqueName: \"kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.959721 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.959813 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.959888 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnmbp\" (UniqueName: \"kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.960468 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.960468 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:36 crc kubenswrapper[4967]: I1121 17:54:36.990963 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnmbp\" (UniqueName: \"kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp\") pod \"redhat-operators-787hc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:37 crc kubenswrapper[4967]: I1121 17:54:37.092674 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:37 crc kubenswrapper[4967]: I1121 17:54:37.631005 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:54:38 crc kubenswrapper[4967]: I1121 17:54:38.450365 4967 generic.go:334] "Generic (PLEG): container finished" podID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerID="95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0" exitCode=0 Nov 21 17:54:38 crc kubenswrapper[4967]: I1121 17:54:38.450464 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerDied","Data":"95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0"} Nov 21 17:54:38 crc kubenswrapper[4967]: I1121 17:54:38.450812 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerStarted","Data":"ff6f908e80e577889e938e1ce5baa6b4bf7cd89d728f68bd99a5dd2cf33bd904"} Nov 21 17:54:40 crc kubenswrapper[4967]: I1121 17:54:40.497198 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerStarted","Data":"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e"} Nov 21 17:54:45 crc kubenswrapper[4967]: I1121 17:54:45.556414 4967 generic.go:334] "Generic (PLEG): container finished" podID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerID="d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e" exitCode=0 Nov 21 17:54:45 crc kubenswrapper[4967]: I1121 17:54:45.556817 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerDied","Data":"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e"} Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.522080 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.522998 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.523065 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.525101 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.525181 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4" gracePeriod=600 Nov 21 17:54:46 crc kubenswrapper[4967]: I1121 17:54:46.954529 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-pwx86_cde8cf1b-1296-410b-82aa-a657c4118292/cert-manager-controller/0.log" Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.211258 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-nv4pt_2a0467f9-ffdb-41c1-9bdc-02224075b4f3/cert-manager-cainjector/0.log" Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.296130 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-d4z6x_f3fc7f11-f784-425a-a74a-c31e2be86970/cert-manager-webhook/0.log" Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.615074 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerStarted","Data":"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e"} Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.626162 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4" exitCode=0 Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.626217 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4"} Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.626253 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1"} Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.626272 4967 scope.go:117] "RemoveContainer" containerID="5edbd484613453019a73f6c8fe8df725d5ab8c6e9edcffcb70cb9e083bf30324" Nov 21 17:54:47 crc kubenswrapper[4967]: I1121 17:54:47.648055 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-787hc" podStartSLOduration=3.765263152 podStartE2EDuration="11.648005768s" podCreationTimestamp="2025-11-21 17:54:36 +0000 UTC" firstStartedPulling="2025-11-21 17:54:38.453111976 +0000 UTC m=+8366.711632974" lastFinishedPulling="2025-11-21 17:54:46.335854582 +0000 UTC m=+8374.594375590" observedRunningTime="2025-11-21 17:54:47.637139937 +0000 UTC m=+8375.895660945" watchObservedRunningTime="2025-11-21 17:54:47.648005768 +0000 UTC m=+8375.906526776" Nov 21 17:54:57 crc kubenswrapper[4967]: I1121 17:54:57.092914 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:57 crc kubenswrapper[4967]: I1121 17:54:57.093969 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:54:58 crc kubenswrapper[4967]: I1121 17:54:58.178294 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-787hc" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" probeResult="failure" output=< Nov 21 17:54:58 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:54:58 crc kubenswrapper[4967]: > Nov 21 17:55:03 crc kubenswrapper[4967]: I1121 17:55:03.420557 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-cd9cs_2a407648-607f-4dc4-a6b6-7ae364ae228b/nmstate-console-plugin/0.log" Nov 21 17:55:03 crc kubenswrapper[4967]: I1121 17:55:03.849227 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-krp57_b669d096-a783-4c35-9bd5-a489346af9d8/nmstate-handler/0.log" Nov 21 17:55:03 crc kubenswrapper[4967]: I1121 17:55:03.974466 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fgnlx_8eecf12e-205c-45b3-8be3-84dd5d0c6803/kube-rbac-proxy/0.log" Nov 21 17:55:04 crc kubenswrapper[4967]: I1121 17:55:04.102729 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-fgnlx_8eecf12e-205c-45b3-8be3-84dd5d0c6803/nmstate-metrics/0.log" Nov 21 17:55:04 crc kubenswrapper[4967]: I1121 17:55:04.277213 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-sv9h6_d478a29a-6695-471a-b25d-d5c34c6cd916/nmstate-operator/0.log" Nov 21 17:55:04 crc kubenswrapper[4967]: I1121 17:55:04.337451 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-v5hpv_c50f20c0-a383-4f6b-bfb2-407d0311697e/nmstate-webhook/0.log" Nov 21 17:55:08 crc kubenswrapper[4967]: I1121 17:55:08.158393 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-787hc" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" probeResult="failure" output=< Nov 21 17:55:08 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 17:55:08 crc kubenswrapper[4967]: > Nov 21 17:55:17 crc kubenswrapper[4967]: I1121 17:55:17.148842 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:55:17 crc kubenswrapper[4967]: I1121 17:55:17.213078 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:55:17 crc kubenswrapper[4967]: I1121 17:55:17.396581 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:55:18 crc kubenswrapper[4967]: I1121 17:55:18.379590 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-77f9f48c4d-942m7_cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7/kube-rbac-proxy/0.log" Nov 21 17:55:18 crc kubenswrapper[4967]: I1121 17:55:18.393958 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-77f9f48c4d-942m7_cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7/manager/0.log" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.064215 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-787hc" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" containerID="cri-o://3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e" gracePeriod=2 Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.676700 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.844872 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnmbp\" (UniqueName: \"kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp\") pod \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.845211 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content\") pod \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.845467 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities\") pod \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\" (UID: \"f95310d7-bcc1-46a9-b2f4-dc54f621dafc\") " Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.846012 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities" (OuterVolumeSpecName: "utilities") pod "f95310d7-bcc1-46a9-b2f4-dc54f621dafc" (UID: "f95310d7-bcc1-46a9-b2f4-dc54f621dafc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.846304 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.853201 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp" (OuterVolumeSpecName: "kube-api-access-cnmbp") pod "f95310d7-bcc1-46a9-b2f4-dc54f621dafc" (UID: "f95310d7-bcc1-46a9-b2f4-dc54f621dafc"). InnerVolumeSpecName "kube-api-access-cnmbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.952190 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnmbp\" (UniqueName: \"kubernetes.io/projected/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-kube-api-access-cnmbp\") on node \"crc\" DevicePath \"\"" Nov 21 17:55:19 crc kubenswrapper[4967]: I1121 17:55:19.966745 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f95310d7-bcc1-46a9-b2f4-dc54f621dafc" (UID: "f95310d7-bcc1-46a9-b2f4-dc54f621dafc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.055563 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95310d7-bcc1-46a9-b2f4-dc54f621dafc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.077884 4967 generic.go:334] "Generic (PLEG): container finished" podID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerID="3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e" exitCode=0 Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.077939 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerDied","Data":"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e"} Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.077981 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-787hc" event={"ID":"f95310d7-bcc1-46a9-b2f4-dc54f621dafc","Type":"ContainerDied","Data":"ff6f908e80e577889e938e1ce5baa6b4bf7cd89d728f68bd99a5dd2cf33bd904"} Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.077999 4967 scope.go:117] "RemoveContainer" containerID="3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.078012 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-787hc" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.098418 4967 scope.go:117] "RemoveContainer" containerID="d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.119710 4967 scope.go:117] "RemoveContainer" containerID="95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.137172 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.147809 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-787hc"] Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.194091 4967 scope.go:117] "RemoveContainer" containerID="3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e" Nov 21 17:55:20 crc kubenswrapper[4967]: E1121 17:55:20.194692 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e\": container with ID starting with 3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e not found: ID does not exist" containerID="3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.194751 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e"} err="failed to get container status \"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e\": rpc error: code = NotFound desc = could not find container \"3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e\": container with ID starting with 3f02c54d78aafd4659cbca50c595be23151c89e558c85e87fa55f32607e94a1e not found: ID does not exist" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.194783 4967 scope.go:117] "RemoveContainer" containerID="d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e" Nov 21 17:55:20 crc kubenswrapper[4967]: E1121 17:55:20.195296 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e\": container with ID starting with d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e not found: ID does not exist" containerID="d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.195358 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e"} err="failed to get container status \"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e\": rpc error: code = NotFound desc = could not find container \"d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e\": container with ID starting with d438a57692c1edf218d8147ce8fd80352dfc2f08dc154f209d8e3ad40bdfef9e not found: ID does not exist" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.195390 4967 scope.go:117] "RemoveContainer" containerID="95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0" Nov 21 17:55:20 crc kubenswrapper[4967]: E1121 17:55:20.195770 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0\": container with ID starting with 95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0 not found: ID does not exist" containerID="95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.195829 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0"} err="failed to get container status \"95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0\": rpc error: code = NotFound desc = could not find container \"95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0\": container with ID starting with 95e31b15898774b096c4e9653b81346235d49f6e38529c9a586e1896458dddf0 not found: ID does not exist" Nov 21 17:55:20 crc kubenswrapper[4967]: I1121 17:55:20.550566 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" path="/var/lib/kubelet/pods/f95310d7-bcc1-46a9-b2f4-dc54f621dafc/volumes" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.128834 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-xrnc9_4617859e-9a3d-412d-adbe-b229de618303/cluster-logging-operator/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.313005 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-jv4x8_480deae0-8dd9-46d9-86fd-19cda7420bf5/collector/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.352289 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_e0783f70-8b59-4215-be4a-8ca2c97cc788/loki-compactor/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.584005 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-w6drw_942afa8f-650f-4a9e-b47f-2be4134d16b9/loki-distributor/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.689194 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f4f7895cf-4xd89_8833a69e-7f87-4f56-9610-8dd9cb841732/gateway/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.733616 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f4f7895cf-4xd89_8833a69e-7f87-4f56-9610-8dd9cb841732/opa/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.876414 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f4f7895cf-thgv6_1c995e7a-4ea8-459f-83a9-eede922cb3e3/gateway/0.log" Nov 21 17:55:35 crc kubenswrapper[4967]: I1121 17:55:35.939189 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-f4f7895cf-thgv6_1c995e7a-4ea8-459f-83a9-eede922cb3e3/opa/0.log" Nov 21 17:55:36 crc kubenswrapper[4967]: I1121 17:55:36.037875 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_ea93b8c8-babe-4417-8741-9ae060295ba0/loki-index-gateway/0.log" Nov 21 17:55:36 crc kubenswrapper[4967]: I1121 17:55:36.296014 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_16e7bcb3-c6a0-440e-a47f-4c3ce1ddd3c4/loki-ingester/0.log" Nov 21 17:55:36 crc kubenswrapper[4967]: I1121 17:55:36.317406 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-56lsk_d7cbc5b7-37df-4e57-8e69-aa9c9e9cd0d3/loki-querier/0.log" Nov 21 17:55:36 crc kubenswrapper[4967]: I1121 17:55:36.569890 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-dsn5g_16d794ce-4b6f-4250-835b-28311b905a2c/loki-query-frontend/0.log" Nov 21 17:55:52 crc kubenswrapper[4967]: I1121 17:55:52.703426 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-x7rr6_0007ae58-10dd-45bc-85cb-2a74a5cca4e5/kube-rbac-proxy/0.log" Nov 21 17:55:52 crc kubenswrapper[4967]: I1121 17:55:52.819058 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-x7rr6_0007ae58-10dd-45bc-85cb-2a74a5cca4e5/controller/0.log" Nov 21 17:55:52 crc kubenswrapper[4967]: I1121 17:55:52.981415 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-frr-files/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.184802 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-frr-files/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.206863 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-metrics/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.206932 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-reloader/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.229096 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-reloader/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.417753 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-reloader/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.420386 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-frr-files/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.455514 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-metrics/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.493952 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-metrics/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.660024 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-frr-files/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.683878 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-reloader/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.711906 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/cp-metrics/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.754643 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/controller/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.909136 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/frr-metrics/0.log" Nov 21 17:55:53 crc kubenswrapper[4967]: I1121 17:55:53.947751 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/kube-rbac-proxy/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.027643 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/kube-rbac-proxy-frr/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.199894 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/reloader/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.258151 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-wzh6s_1fa4e173-1be2-4f7d-82e8-d607e1481bcd/frr-k8s-webhook-server/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.513603 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-77bbdf4fb9-hj2sz_33fe3ce1-2592-438b-a9a0-8c55a47013d2/manager/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.752140 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-7d67c6f6df-vjpg7_c3800b73-ab16-46a6-b24d-e96158d1deec/webhook-server/0.log" Nov 21 17:55:54 crc kubenswrapper[4967]: I1121 17:55:54.788772 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cz2xh_52ab67e9-1c78-497d-aa00-18a29052c0bd/kube-rbac-proxy/0.log" Nov 21 17:55:55 crc kubenswrapper[4967]: I1121 17:55:55.757564 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-cz2xh_52ab67e9-1c78-497d-aa00-18a29052c0bd/speaker/0.log" Nov 21 17:55:56 crc kubenswrapper[4967]: I1121 17:55:56.481694 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-knjxz_f9285829-036c-4010-b85e-6fcec9f6ce0e/frr/0.log" Nov 21 17:56:11 crc kubenswrapper[4967]: I1121 17:56:11.784349 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/util/0.log" Nov 21 17:56:11 crc kubenswrapper[4967]: I1121 17:56:11.974532 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/util/0.log" Nov 21 17:56:11 crc kubenswrapper[4967]: I1121 17:56:11.974789 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/pull/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.040232 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/pull/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.291038 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/extract/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.412006 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/util/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.432598 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8bjkqk_04f688a6-4d0b-49f8-99db-98ecfc140fb9/pull/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.557601 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/util/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.749401 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/pull/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.800365 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/pull/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.916023 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/util/0.log" Nov 21 17:56:12 crc kubenswrapper[4967]: I1121 17:56:12.961325 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/util/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.023228 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/pull/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.059463 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772evwmt8_646e86eb-f5d8-43c0-9d54-8fdb55418f0d/extract/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.205381 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/util/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.425064 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/pull/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.512910 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/pull/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.661639 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/util/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.673487 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/util/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.745541 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/extract/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.776374 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210wq9qg_3989e919-932b-4153-98cf-5f1ebcc40f89/pull/0.log" Nov 21 17:56:13 crc kubenswrapper[4967]: I1121 17:56:13.988599 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/util/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.212701 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/pull/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.223527 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/util/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.563896 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/pull/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.897553 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/pull/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.952656 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/extract/0.log" Nov 21 17:56:14 crc kubenswrapper[4967]: I1121 17:56:14.957762 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463fghncf_0d08e0c8-b3f9-4742-9388-686edea297eb/util/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.148230 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-utilities/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.366918 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-utilities/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.367420 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-content/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.423923 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-content/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.621380 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-utilities/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.664503 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/extract-content/0.log" Nov 21 17:56:15 crc kubenswrapper[4967]: I1121 17:56:15.872451 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-utilities/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.048571 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-utilities/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.121414 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-content/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.122792 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-content/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.360534 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-utilities/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.406601 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/extract-content/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.629722 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/util/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.862334 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/pull/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.885380 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/pull/0.log" Nov 21 17:56:16 crc kubenswrapper[4967]: I1121 17:56:16.935996 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/util/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.267727 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/extract/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.275780 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/pull/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.332069 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q2vpn_e04b35e4-8722-44c7-8c0a-356e143f637e/registry-server/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.371731 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c62rfx7_b71c70ea-eebb-40fb-baef-5e993f014e89/util/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.554572 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-xntrp_34e31926-b4b0-4c27-b2e9-8825d80a21f9/marketplace-operator/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.750782 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-utilities/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.947997 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-content/0.log" Nov 21 17:56:17 crc kubenswrapper[4967]: I1121 17:56:17.990515 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-utilities/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.046439 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-content/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.166660 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2dhwv_58b7bcbc-c1ff-48a4-8d78-eded8239d6a4/registry-server/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.261057 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-content/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.418637 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/extract-utilities/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.437200 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-utilities/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.568081 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-29zhf_a8e33eb6-c76a-4d8b-896e-75ab69247a2a/registry-server/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.745560 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-utilities/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.748250 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-content/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.773780 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-content/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.961702 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-content/0.log" Nov 21 17:56:18 crc kubenswrapper[4967]: I1121 17:56:18.969486 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/extract-utilities/0.log" Nov 21 17:56:20 crc kubenswrapper[4967]: I1121 17:56:20.250424 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-tnkcg_0e20dcd4-a457-43b1-a0a0-5cb1ee78cf25/registry-server/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.113818 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-wqt49_f6e9b133-82e0-4185-9fa4-7007ffe75f5d/prometheus-operator/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.355212 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-974477bf4-9z6rp_aec55e5b-3112-46e8-bc8b-c643e8fca0fe/prometheus-operator-admission-webhook/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.398795 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-974477bf4-xtlzj_bf8c40bc-4733-49aa-b2e2-9297e0b7bd30/prometheus-operator-admission-webhook/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.585569 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-pr4jn_0070e88a-ae9a-4436-ab1f-4e8e4e2ba557/operator/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.641749 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-b7zgw_55a13dc8-8cec-4642-9c0b-3c6799d942fc/observability-ui-dashboards/0.log" Nov 21 17:56:34 crc kubenswrapper[4967]: I1121 17:56:34.776707 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-6tvkc_ec826dcc-83f7-4138-b93c-25603f94599a/perses-operator/0.log" Nov 21 17:56:46 crc kubenswrapper[4967]: I1121 17:56:46.522212 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:56:46 crc kubenswrapper[4967]: I1121 17:56:46.523557 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:56:50 crc kubenswrapper[4967]: I1121 17:56:50.021802 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-77f9f48c4d-942m7_cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7/kube-rbac-proxy/0.log" Nov 21 17:56:50 crc kubenswrapper[4967]: I1121 17:56:50.041985 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-77f9f48c4d-942m7_cbd913ea-db25-4e7a-a2ec-c1a39a83ebe7/manager/0.log" Nov 21 17:57:09 crc kubenswrapper[4967]: E1121 17:57:09.615318 4967 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.148:34702->38.102.83.148:38425: write tcp 38.102.83.148:34702->38.102.83.148:38425: write: broken pipe Nov 21 17:57:16 crc kubenswrapper[4967]: I1121 17:57:16.522066 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:57:16 crc kubenswrapper[4967]: I1121 17:57:16.522975 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:57:46 crc kubenswrapper[4967]: I1121 17:57:46.524407 4967 patch_prober.go:28] interesting pod/machine-config-daemon-lrth2 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 17:57:46 crc kubenswrapper[4967]: I1121 17:57:46.524905 4967 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 17:57:46 crc kubenswrapper[4967]: I1121 17:57:46.524969 4967 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" Nov 21 17:57:46 crc kubenswrapper[4967]: I1121 17:57:46.526249 4967 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1"} pod="openshift-machine-config-operator/machine-config-daemon-lrth2" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 17:57:46 crc kubenswrapper[4967]: I1121 17:57:46.526335 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerName="machine-config-daemon" containerID="cri-o://21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" gracePeriod=600 Nov 21 17:57:46 crc kubenswrapper[4967]: E1121 17:57:46.660020 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:57:47 crc kubenswrapper[4967]: I1121 17:57:47.315363 4967 scope.go:117] "RemoveContainer" containerID="8e4273ce9f73ff5b303c4a026031340afe26b1db282415664ba9d1f91d6d194c" Nov 21 17:57:47 crc kubenswrapper[4967]: I1121 17:57:47.665902 4967 generic.go:334] "Generic (PLEG): container finished" podID="8f12a156-8db0-49be-a048-e7c4988f9cd0" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" exitCode=0 Nov 21 17:57:47 crc kubenswrapper[4967]: I1121 17:57:47.665965 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerDied","Data":"21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1"} Nov 21 17:57:47 crc kubenswrapper[4967]: I1121 17:57:47.666013 4967 scope.go:117] "RemoveContainer" containerID="7f557661ba6f6037ba393d7f4791869dbf4276d289589af753d8dc1537d3b2c4" Nov 21 17:57:47 crc kubenswrapper[4967]: I1121 17:57:47.667251 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:57:47 crc kubenswrapper[4967]: E1121 17:57:47.668084 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:00 crc kubenswrapper[4967]: I1121 17:58:00.536729 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:58:00 crc kubenswrapper[4967]: E1121 17:58:00.538304 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:11 crc kubenswrapper[4967]: I1121 17:58:11.547917 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:58:11 crc kubenswrapper[4967]: E1121 17:58:11.548721 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:25 crc kubenswrapper[4967]: I1121 17:58:25.536426 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:58:25 crc kubenswrapper[4967]: E1121 17:58:25.537880 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:37 crc kubenswrapper[4967]: I1121 17:58:37.537453 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:58:37 crc kubenswrapper[4967]: E1121 17:58:37.538555 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:47 crc kubenswrapper[4967]: I1121 17:58:47.413286 4967 scope.go:117] "RemoveContainer" containerID="99da2aecc36ddce379682f5890abf61131c8fc0b45c634e936c7986219d7c1cc" Nov 21 17:58:52 crc kubenswrapper[4967]: I1121 17:58:52.549725 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:58:52 crc kubenswrapper[4967]: E1121 17:58:52.551683 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:58:59 crc kubenswrapper[4967]: I1121 17:58:59.756337 4967 generic.go:334] "Generic (PLEG): container finished" podID="d9f8b255-4914-4511-8534-2814bc1c1181" containerID="babbe72c705a64d8422c3d9fbd0246f24c7a47e407478bad9667048c7a98ff20" exitCode=0 Nov 21 17:58:59 crc kubenswrapper[4967]: I1121 17:58:59.756458 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-97nf5/must-gather-lmdw5" event={"ID":"d9f8b255-4914-4511-8534-2814bc1c1181","Type":"ContainerDied","Data":"babbe72c705a64d8422c3d9fbd0246f24c7a47e407478bad9667048c7a98ff20"} Nov 21 17:58:59 crc kubenswrapper[4967]: I1121 17:58:59.757901 4967 scope.go:117] "RemoveContainer" containerID="babbe72c705a64d8422c3d9fbd0246f24c7a47e407478bad9667048c7a98ff20" Nov 21 17:58:59 crc kubenswrapper[4967]: I1121 17:58:59.904914 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-97nf5_must-gather-lmdw5_d9f8b255-4914-4511-8534-2814bc1c1181/gather/0.log" Nov 21 17:59:05 crc kubenswrapper[4967]: I1121 17:59:05.536762 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:59:05 crc kubenswrapper[4967]: E1121 17:59:05.537800 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:59:08 crc kubenswrapper[4967]: I1121 17:59:08.681131 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-97nf5/must-gather-lmdw5"] Nov 21 17:59:08 crc kubenswrapper[4967]: I1121 17:59:08.681780 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-97nf5/must-gather-lmdw5" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="copy" containerID="cri-o://bafa52768d982ffd5caa061e21e16e7132e166abe3f9175fe429f845465e68a8" gracePeriod=2 Nov 21 17:59:08 crc kubenswrapper[4967]: I1121 17:59:08.695049 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-97nf5/must-gather-lmdw5"] Nov 21 17:59:08 crc kubenswrapper[4967]: I1121 17:59:08.926998 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-97nf5_must-gather-lmdw5_d9f8b255-4914-4511-8534-2814bc1c1181/copy/0.log" Nov 21 17:59:08 crc kubenswrapper[4967]: I1121 17:59:08.927720 4967 generic.go:334] "Generic (PLEG): container finished" podID="d9f8b255-4914-4511-8534-2814bc1c1181" containerID="bafa52768d982ffd5caa061e21e16e7132e166abe3f9175fe429f845465e68a8" exitCode=143 Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.400796 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-97nf5_must-gather-lmdw5_d9f8b255-4914-4511-8534-2814bc1c1181/copy/0.log" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.427014 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.563587 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output\") pod \"d9f8b255-4914-4511-8534-2814bc1c1181\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.564415 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv8g2\" (UniqueName: \"kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2\") pod \"d9f8b255-4914-4511-8534-2814bc1c1181\" (UID: \"d9f8b255-4914-4511-8534-2814bc1c1181\") " Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.572170 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2" (OuterVolumeSpecName: "kube-api-access-lv8g2") pod "d9f8b255-4914-4511-8534-2814bc1c1181" (UID: "d9f8b255-4914-4511-8534-2814bc1c1181"). InnerVolumeSpecName "kube-api-access-lv8g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.668869 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv8g2\" (UniqueName: \"kubernetes.io/projected/d9f8b255-4914-4511-8534-2814bc1c1181-kube-api-access-lv8g2\") on node \"crc\" DevicePath \"\"" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.777002 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "d9f8b255-4914-4511-8534-2814bc1c1181" (UID: "d9f8b255-4914-4511-8534-2814bc1c1181"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.904624 4967 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d9f8b255-4914-4511-8534-2814bc1c1181-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.952134 4967 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-97nf5_must-gather-lmdw5_d9f8b255-4914-4511-8534-2814bc1c1181/copy/0.log" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.956041 4967 scope.go:117] "RemoveContainer" containerID="bafa52768d982ffd5caa061e21e16e7132e166abe3f9175fe429f845465e68a8" Nov 21 17:59:09 crc kubenswrapper[4967]: I1121 17:59:09.956115 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-97nf5/must-gather-lmdw5" Nov 21 17:59:10 crc kubenswrapper[4967]: I1121 17:59:10.017510 4967 scope.go:117] "RemoveContainer" containerID="babbe72c705a64d8422c3d9fbd0246f24c7a47e407478bad9667048c7a98ff20" Nov 21 17:59:10 crc kubenswrapper[4967]: I1121 17:59:10.556744 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" path="/var/lib/kubelet/pods/d9f8b255-4914-4511-8534-2814bc1c1181/volumes" Nov 21 17:59:20 crc kubenswrapper[4967]: I1121 17:59:20.536909 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:59:20 crc kubenswrapper[4967]: E1121 17:59:20.538299 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:59:33 crc kubenswrapper[4967]: I1121 17:59:33.536868 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:59:33 crc kubenswrapper[4967]: E1121 17:59:33.537904 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:59:46 crc kubenswrapper[4967]: I1121 17:59:46.536816 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:59:46 crc kubenswrapper[4967]: E1121 17:59:46.539643 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 17:59:57 crc kubenswrapper[4967]: I1121 17:59:57.536693 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 17:59:57 crc kubenswrapper[4967]: E1121 17:59:57.537640 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.163763 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc"] Nov 21 18:00:00 crc kubenswrapper[4967]: E1121 18:00:00.165060 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165076 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" Nov 21 18:00:00 crc kubenswrapper[4967]: E1121 18:00:00.165121 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="extract-content" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165127 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="extract-content" Nov 21 18:00:00 crc kubenswrapper[4967]: E1121 18:00:00.165148 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="copy" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165154 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="copy" Nov 21 18:00:00 crc kubenswrapper[4967]: E1121 18:00:00.165169 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="extract-utilities" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165176 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="extract-utilities" Nov 21 18:00:00 crc kubenswrapper[4967]: E1121 18:00:00.165184 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="gather" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165190 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="gather" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165550 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="copy" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165572 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95310d7-bcc1-46a9-b2f4-dc54f621dafc" containerName="registry-server" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.165601 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9f8b255-4914-4511-8534-2814bc1c1181" containerName="gather" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.166763 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.175104 4967 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.176663 4967 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.176767 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc"] Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.308593 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8vjww\" (UniqueName: \"kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.308780 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.308895 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.411636 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.412577 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.412806 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.413857 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8vjww\" (UniqueName: \"kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.430240 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.434259 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8vjww\" (UniqueName: \"kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww\") pod \"collect-profiles-29395800-722cc\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:00 crc kubenswrapper[4967]: I1121 18:00:00.500544 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:01 crc kubenswrapper[4967]: I1121 18:00:01.089294 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc"] Nov 21 18:00:01 crc kubenswrapper[4967]: I1121 18:00:01.635108 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" event={"ID":"800076f4-1a46-4ab2-914f-2e6903ddafae","Type":"ContainerStarted","Data":"378cbfb1b0eeb2eb759e280d41fcefd2933ae11de3a0d0df74d99e176b386fa4"} Nov 21 18:00:01 crc kubenswrapper[4967]: I1121 18:00:01.635623 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" event={"ID":"800076f4-1a46-4ab2-914f-2e6903ddafae","Type":"ContainerStarted","Data":"a433da818989410c76be957fc73f9e3389203d767a1147ca922cf4f970929203"} Nov 21 18:00:01 crc kubenswrapper[4967]: I1121 18:00:01.754821 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" podStartSLOduration=1.754798524 podStartE2EDuration="1.754798524s" podCreationTimestamp="2025-11-21 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 18:00:01.753707503 +0000 UTC m=+8690.012228521" watchObservedRunningTime="2025-11-21 18:00:01.754798524 +0000 UTC m=+8690.013319542" Nov 21 18:00:02 crc kubenswrapper[4967]: I1121 18:00:02.646177 4967 generic.go:334] "Generic (PLEG): container finished" podID="800076f4-1a46-4ab2-914f-2e6903ddafae" containerID="378cbfb1b0eeb2eb759e280d41fcefd2933ae11de3a0d0df74d99e176b386fa4" exitCode=0 Nov 21 18:00:02 crc kubenswrapper[4967]: I1121 18:00:02.646435 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" event={"ID":"800076f4-1a46-4ab2-914f-2e6903ddafae","Type":"ContainerDied","Data":"378cbfb1b0eeb2eb759e280d41fcefd2933ae11de3a0d0df74d99e176b386fa4"} Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.078137 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.223982 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume\") pod \"800076f4-1a46-4ab2-914f-2e6903ddafae\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.224369 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume\") pod \"800076f4-1a46-4ab2-914f-2e6903ddafae\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.224425 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8vjww\" (UniqueName: \"kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww\") pod \"800076f4-1a46-4ab2-914f-2e6903ddafae\" (UID: \"800076f4-1a46-4ab2-914f-2e6903ddafae\") " Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.224870 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume" (OuterVolumeSpecName: "config-volume") pod "800076f4-1a46-4ab2-914f-2e6903ddafae" (UID: "800076f4-1a46-4ab2-914f-2e6903ddafae"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.225281 4967 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/800076f4-1a46-4ab2-914f-2e6903ddafae-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.231431 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "800076f4-1a46-4ab2-914f-2e6903ddafae" (UID: "800076f4-1a46-4ab2-914f-2e6903ddafae"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.231710 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww" (OuterVolumeSpecName: "kube-api-access-8vjww") pod "800076f4-1a46-4ab2-914f-2e6903ddafae" (UID: "800076f4-1a46-4ab2-914f-2e6903ddafae"). InnerVolumeSpecName "kube-api-access-8vjww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.328271 4967 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/800076f4-1a46-4ab2-914f-2e6903ddafae-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.328448 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8vjww\" (UniqueName: \"kubernetes.io/projected/800076f4-1a46-4ab2-914f-2e6903ddafae-kube-api-access-8vjww\") on node \"crc\" DevicePath \"\"" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.676894 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" event={"ID":"800076f4-1a46-4ab2-914f-2e6903ddafae","Type":"ContainerDied","Data":"a433da818989410c76be957fc73f9e3389203d767a1147ca922cf4f970929203"} Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.676940 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a433da818989410c76be957fc73f9e3389203d767a1147ca922cf4f970929203" Nov 21 18:00:04 crc kubenswrapper[4967]: I1121 18:00:04.677236 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395800-722cc" Nov 21 18:00:05 crc kubenswrapper[4967]: I1121 18:00:05.164110 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk"] Nov 21 18:00:05 crc kubenswrapper[4967]: I1121 18:00:05.204605 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395755-jdjjk"] Nov 21 18:00:06 crc kubenswrapper[4967]: I1121 18:00:06.557009 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12a3823a-2a4d-41f9-b327-2d0f87951cb5" path="/var/lib/kubelet/pods/12a3823a-2a4d-41f9-b327-2d0f87951cb5/volumes" Nov 21 18:00:08 crc kubenswrapper[4967]: I1121 18:00:08.542791 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:00:08 crc kubenswrapper[4967]: E1121 18:00:08.543868 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:00:22 crc kubenswrapper[4967]: I1121 18:00:22.547894 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:00:22 crc kubenswrapper[4967]: E1121 18:00:22.548887 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:00:36 crc kubenswrapper[4967]: I1121 18:00:36.536262 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:00:36 crc kubenswrapper[4967]: E1121 18:00:36.537362 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:00:47 crc kubenswrapper[4967]: I1121 18:00:47.653921 4967 scope.go:117] "RemoveContainer" containerID="c33f25dbb70fa93692d0caedbe6c42c9e124bf3a475aeebdbe52fe18b098ce9e" Nov 21 18:00:49 crc kubenswrapper[4967]: I1121 18:00:49.536653 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:00:49 crc kubenswrapper[4967]: E1121 18:00:49.537658 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.187065 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29395801-5v4g6"] Nov 21 18:01:00 crc kubenswrapper[4967]: E1121 18:01:00.188753 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="800076f4-1a46-4ab2-914f-2e6903ddafae" containerName="collect-profiles" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.188776 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="800076f4-1a46-4ab2-914f-2e6903ddafae" containerName="collect-profiles" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.189133 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="800076f4-1a46-4ab2-914f-2e6903ddafae" containerName="collect-profiles" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.190819 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.214532 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395801-5v4g6"] Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.256361 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.256630 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.256704 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55q9l\" (UniqueName: \"kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.256745 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.359837 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.359954 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55q9l\" (UniqueName: \"kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.359987 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.360233 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.369229 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.371289 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.371922 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.378845 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55q9l\" (UniqueName: \"kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l\") pod \"keystone-cron-29395801-5v4g6\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.530647 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:00 crc kubenswrapper[4967]: I1121 18:01:00.538631 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:01:00 crc kubenswrapper[4967]: E1121 18:01:00.539069 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:01:01 crc kubenswrapper[4967]: I1121 18:01:01.064969 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395801-5v4g6"] Nov 21 18:01:01 crc kubenswrapper[4967]: I1121 18:01:01.540925 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395801-5v4g6" event={"ID":"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98","Type":"ContainerStarted","Data":"d476b050166b5bf6b3d22d0ee3c077f469a5fd30b2553465f0deade8e17f6019"} Nov 21 18:01:01 crc kubenswrapper[4967]: I1121 18:01:01.541402 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395801-5v4g6" event={"ID":"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98","Type":"ContainerStarted","Data":"1294a641ea67a5e384efb946321743d70b6cdbf5c0eb6654228b201e1d980661"} Nov 21 18:01:01 crc kubenswrapper[4967]: I1121 18:01:01.578747 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29395801-5v4g6" podStartSLOduration=1.57872215 podStartE2EDuration="1.57872215s" podCreationTimestamp="2025-11-21 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 18:01:01.56648212 +0000 UTC m=+8749.825003128" watchObservedRunningTime="2025-11-21 18:01:01.57872215 +0000 UTC m=+8749.837243158" Nov 21 18:01:05 crc kubenswrapper[4967]: I1121 18:01:05.610193 4967 generic.go:334] "Generic (PLEG): container finished" podID="7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" containerID="d476b050166b5bf6b3d22d0ee3c077f469a5fd30b2553465f0deade8e17f6019" exitCode=0 Nov 21 18:01:05 crc kubenswrapper[4967]: I1121 18:01:05.610383 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395801-5v4g6" event={"ID":"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98","Type":"ContainerDied","Data":"d476b050166b5bf6b3d22d0ee3c077f469a5fd30b2553465f0deade8e17f6019"} Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.116707 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.192353 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys\") pod \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.192497 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle\") pod \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.192540 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55q9l\" (UniqueName: \"kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l\") pod \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.192720 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data\") pod \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\" (UID: \"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98\") " Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.206671 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" (UID: "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.206942 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l" (OuterVolumeSpecName: "kube-api-access-55q9l") pod "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" (UID: "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98"). InnerVolumeSpecName "kube-api-access-55q9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.251828 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" (UID: "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.273843 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data" (OuterVolumeSpecName: "config-data") pod "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" (UID: "7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.296384 4967 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.296429 4967 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.296446 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55q9l\" (UniqueName: \"kubernetes.io/projected/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-kube-api-access-55q9l\") on node \"crc\" DevicePath \"\"" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.296459 4967 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.645223 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395801-5v4g6" event={"ID":"7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98","Type":"ContainerDied","Data":"1294a641ea67a5e384efb946321743d70b6cdbf5c0eb6654228b201e1d980661"} Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.645277 4967 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1294a641ea67a5e384efb946321743d70b6cdbf5c0eb6654228b201e1d980661" Nov 21 18:01:07 crc kubenswrapper[4967]: I1121 18:01:07.645381 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395801-5v4g6" Nov 21 18:01:13 crc kubenswrapper[4967]: I1121 18:01:13.537086 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:01:13 crc kubenswrapper[4967]: E1121 18:01:13.538630 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:01:24 crc kubenswrapper[4967]: I1121 18:01:24.538224 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:01:24 crc kubenswrapper[4967]: E1121 18:01:24.539722 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:01:39 crc kubenswrapper[4967]: I1121 18:01:39.536981 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:01:39 crc kubenswrapper[4967]: E1121 18:01:39.538344 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:01:50 crc kubenswrapper[4967]: I1121 18:01:50.537503 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:01:50 crc kubenswrapper[4967]: E1121 18:01:50.540019 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:02:03 crc kubenswrapper[4967]: I1121 18:02:03.537288 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:02:03 crc kubenswrapper[4967]: E1121 18:02:03.539837 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:02:18 crc kubenswrapper[4967]: I1121 18:02:18.537467 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:02:18 crc kubenswrapper[4967]: E1121 18:02:18.538199 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:02:29 crc kubenswrapper[4967]: I1121 18:02:29.536984 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:02:29 crc kubenswrapper[4967]: E1121 18:02:29.538364 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:02:42 crc kubenswrapper[4967]: I1121 18:02:42.545511 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:02:42 crc kubenswrapper[4967]: E1121 18:02:42.548583 4967 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-lrth2_openshift-machine-config-operator(8f12a156-8db0-49be-a048-e7c4988f9cd0)\"" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" podUID="8f12a156-8db0-49be-a048-e7c4988f9cd0" Nov 21 18:02:56 crc kubenswrapper[4967]: I1121 18:02:56.537489 4967 scope.go:117] "RemoveContainer" containerID="21587a4df3de1c8e5d8aa4ee0a8712f90f5367ca24f1ab2d7a027fb519cc37b1" Nov 21 18:02:57 crc kubenswrapper[4967]: I1121 18:02:57.280938 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-lrth2" event={"ID":"8f12a156-8db0-49be-a048-e7c4988f9cd0","Type":"ContainerStarted","Data":"265b0dac333b8004f3f8cbb52621dab344744da6bf3d2e0b6261b0b940c6bb11"} Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.718960 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:01 crc kubenswrapper[4967]: E1121 18:03:01.720285 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" containerName="keystone-cron" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.720303 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" containerName="keystone-cron" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.720681 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cbe83e5-7d80-42ea-aa7d-4e5c2b51cf98" containerName="keystone-cron" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.723173 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.754939 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.847073 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.847602 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.847961 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jf5k\" (UniqueName: \"kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.951105 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jf5k\" (UniqueName: \"kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.951338 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.951390 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.951974 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.952094 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:01 crc kubenswrapper[4967]: I1121 18:03:01.976838 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jf5k\" (UniqueName: \"kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k\") pod \"certified-operators-4z6ws\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:02 crc kubenswrapper[4967]: I1121 18:03:02.090485 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:02 crc kubenswrapper[4967]: I1121 18:03:02.675326 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:03 crc kubenswrapper[4967]: I1121 18:03:03.367798 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerStarted","Data":"cbd19ee7384d03abfa6d60cc51dc073cb8bf8a4f7b92220e8164011334b65458"} Nov 21 18:03:04 crc kubenswrapper[4967]: I1121 18:03:04.398549 4967 generic.go:334] "Generic (PLEG): container finished" podID="f838732b-4788-406e-82ea-6ab58f89c858" containerID="98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb" exitCode=0 Nov 21 18:03:04 crc kubenswrapper[4967]: I1121 18:03:04.399099 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerDied","Data":"98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb"} Nov 21 18:03:04 crc kubenswrapper[4967]: I1121 18:03:04.402085 4967 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 18:03:07 crc kubenswrapper[4967]: I1121 18:03:07.434046 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerStarted","Data":"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905"} Nov 21 18:03:12 crc kubenswrapper[4967]: I1121 18:03:12.509687 4967 generic.go:334] "Generic (PLEG): container finished" podID="f838732b-4788-406e-82ea-6ab58f89c858" containerID="3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905" exitCode=0 Nov 21 18:03:12 crc kubenswrapper[4967]: I1121 18:03:12.509887 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerDied","Data":"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905"} Nov 21 18:03:16 crc kubenswrapper[4967]: I1121 18:03:16.573399 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerStarted","Data":"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a"} Nov 21 18:03:16 crc kubenswrapper[4967]: I1121 18:03:16.605038 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4z6ws" podStartSLOduration=4.55318263 podStartE2EDuration="15.605017897s" podCreationTimestamp="2025-11-21 18:03:01 +0000 UTC" firstStartedPulling="2025-11-21 18:03:04.401795682 +0000 UTC m=+8872.660316690" lastFinishedPulling="2025-11-21 18:03:15.453630929 +0000 UTC m=+8883.712151957" observedRunningTime="2025-11-21 18:03:16.60127884 +0000 UTC m=+8884.859799868" watchObservedRunningTime="2025-11-21 18:03:16.605017897 +0000 UTC m=+8884.863538905" Nov 21 18:03:22 crc kubenswrapper[4967]: I1121 18:03:22.091350 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:22 crc kubenswrapper[4967]: I1121 18:03:22.092543 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:23 crc kubenswrapper[4967]: I1121 18:03:23.142054 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4z6ws" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" probeResult="failure" output=< Nov 21 18:03:23 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 18:03:23 crc kubenswrapper[4967]: > Nov 21 18:03:33 crc kubenswrapper[4967]: I1121 18:03:33.154339 4967 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-4z6ws" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" probeResult="failure" output=< Nov 21 18:03:33 crc kubenswrapper[4967]: timeout: failed to connect service ":50051" within 1s Nov 21 18:03:33 crc kubenswrapper[4967]: > Nov 21 18:03:42 crc kubenswrapper[4967]: I1121 18:03:42.158056 4967 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:42 crc kubenswrapper[4967]: I1121 18:03:42.222407 4967 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:42 crc kubenswrapper[4967]: I1121 18:03:42.402582 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:43 crc kubenswrapper[4967]: I1121 18:03:43.969486 4967 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4z6ws" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" containerID="cri-o://775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a" gracePeriod=2 Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.766064 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.793283 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content\") pod \"f838732b-4788-406e-82ea-6ab58f89c858\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.793380 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jf5k\" (UniqueName: \"kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k\") pod \"f838732b-4788-406e-82ea-6ab58f89c858\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.793421 4967 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities\") pod \"f838732b-4788-406e-82ea-6ab58f89c858\" (UID: \"f838732b-4788-406e-82ea-6ab58f89c858\") " Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.795047 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities" (OuterVolumeSpecName: "utilities") pod "f838732b-4788-406e-82ea-6ab58f89c858" (UID: "f838732b-4788-406e-82ea-6ab58f89c858"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.814965 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k" (OuterVolumeSpecName: "kube-api-access-4jf5k") pod "f838732b-4788-406e-82ea-6ab58f89c858" (UID: "f838732b-4788-406e-82ea-6ab58f89c858"). InnerVolumeSpecName "kube-api-access-4jf5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.867627 4967 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f838732b-4788-406e-82ea-6ab58f89c858" (UID: "f838732b-4788-406e-82ea-6ab58f89c858"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.901743 4967 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.902225 4967 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jf5k\" (UniqueName: \"kubernetes.io/projected/f838732b-4788-406e-82ea-6ab58f89c858-kube-api-access-4jf5k\") on node \"crc\" DevicePath \"\"" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.902665 4967 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f838732b-4788-406e-82ea-6ab58f89c858-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.991627 4967 generic.go:334] "Generic (PLEG): container finished" podID="f838732b-4788-406e-82ea-6ab58f89c858" containerID="775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a" exitCode=0 Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.991688 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerDied","Data":"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a"} Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.991723 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4z6ws" event={"ID":"f838732b-4788-406e-82ea-6ab58f89c858","Type":"ContainerDied","Data":"cbd19ee7384d03abfa6d60cc51dc073cb8bf8a4f7b92220e8164011334b65458"} Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.991744 4967 scope.go:117] "RemoveContainer" containerID="775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a" Nov 21 18:03:44 crc kubenswrapper[4967]: I1121 18:03:44.991931 4967 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4z6ws" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.045590 4967 scope.go:117] "RemoveContainer" containerID="3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.058750 4967 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.074846 4967 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4z6ws"] Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.081401 4967 scope.go:117] "RemoveContainer" containerID="98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.143078 4967 scope.go:117] "RemoveContainer" containerID="775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a" Nov 21 18:03:45 crc kubenswrapper[4967]: E1121 18:03:45.143884 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a\": container with ID starting with 775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a not found: ID does not exist" containerID="775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.143951 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a"} err="failed to get container status \"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a\": rpc error: code = NotFound desc = could not find container \"775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a\": container with ID starting with 775a24bf0c7b49425fddd49d33ba8f81fb4927004c0a5105a397f1bbb87c151a not found: ID does not exist" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.143988 4967 scope.go:117] "RemoveContainer" containerID="3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905" Nov 21 18:03:45 crc kubenswrapper[4967]: E1121 18:03:45.144298 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905\": container with ID starting with 3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905 not found: ID does not exist" containerID="3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.144354 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905"} err="failed to get container status \"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905\": rpc error: code = NotFound desc = could not find container \"3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905\": container with ID starting with 3c04f24b718f7daaf1e94228511b56d20c8b87e86f3f74687626f55217b4d905 not found: ID does not exist" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.144412 4967 scope.go:117] "RemoveContainer" containerID="98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb" Nov 21 18:03:45 crc kubenswrapper[4967]: E1121 18:03:45.144637 4967 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb\": container with ID starting with 98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb not found: ID does not exist" containerID="98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb" Nov 21 18:03:45 crc kubenswrapper[4967]: I1121 18:03:45.144665 4967 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb"} err="failed to get container status \"98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb\": rpc error: code = NotFound desc = could not find container \"98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb\": container with ID starting with 98ba126b23f38dcbffdff7029f9473a5033ff30d8a7773b10e21de6bd11c44cb not found: ID does not exist" Nov 21 18:03:46 crc kubenswrapper[4967]: I1121 18:03:46.556542 4967 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f838732b-4788-406e-82ea-6ab58f89c858" path="/var/lib/kubelet/pods/f838732b-4788-406e-82ea-6ab58f89c858/volumes" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.323888 4967 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c72cj"] Nov 21 18:04:39 crc kubenswrapper[4967]: E1121 18:04:39.325535 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="extract-utilities" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.325551 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="extract-utilities" Nov 21 18:04:39 crc kubenswrapper[4967]: E1121 18:04:39.325576 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.325585 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" Nov 21 18:04:39 crc kubenswrapper[4967]: E1121 18:04:39.325621 4967 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="extract-content" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.325629 4967 state_mem.go:107] "Deleted CPUSet assignment" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="extract-content" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.325926 4967 memory_manager.go:354] "RemoveStaleState removing state" podUID="f838732b-4788-406e-82ea-6ab58f89c858" containerName="registry-server" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.328837 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.370949 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c72cj"] Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.374750 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-utilities\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.375078 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-catalog-content\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.375233 4967 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzm5d\" (UniqueName: \"kubernetes.io/projected/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-kube-api-access-qzm5d\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.478426 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-utilities\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.478535 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-catalog-content\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.478574 4967 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzm5d\" (UniqueName: \"kubernetes.io/projected/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-kube-api-access-qzm5d\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.479222 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-utilities\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.479361 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-catalog-content\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.516861 4967 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzm5d\" (UniqueName: \"kubernetes.io/projected/b10fa2dc-144e-4237-9d21-5fe871ba4ccb-kube-api-access-qzm5d\") pod \"redhat-operators-c72cj\" (UID: \"b10fa2dc-144e-4237-9d21-5fe871ba4ccb\") " pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:39 crc kubenswrapper[4967]: I1121 18:04:39.689736 4967 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c72cj" Nov 21 18:04:40 crc kubenswrapper[4967]: I1121 18:04:40.403268 4967 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c72cj"] Nov 21 18:04:40 crc kubenswrapper[4967]: I1121 18:04:40.886400 4967 generic.go:334] "Generic (PLEG): container finished" podID="b10fa2dc-144e-4237-9d21-5fe871ba4ccb" containerID="012aa4bd1f819388f64d26350d24d9355f9d9381b6159f3bf6275958ceae129b" exitCode=0 Nov 21 18:04:40 crc kubenswrapper[4967]: I1121 18:04:40.886461 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c72cj" event={"ID":"b10fa2dc-144e-4237-9d21-5fe871ba4ccb","Type":"ContainerDied","Data":"012aa4bd1f819388f64d26350d24d9355f9d9381b6159f3bf6275958ceae129b"} Nov 21 18:04:40 crc kubenswrapper[4967]: I1121 18:04:40.886809 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c72cj" event={"ID":"b10fa2dc-144e-4237-9d21-5fe871ba4ccb","Type":"ContainerStarted","Data":"43d710a386bd4afe580f3d2d0a6b767f2f3fc80d2cea7c99b37b542970f8a732"} Nov 21 18:04:43 crc kubenswrapper[4967]: I1121 18:04:43.944801 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c72cj" event={"ID":"b10fa2dc-144e-4237-9d21-5fe871ba4ccb","Type":"ContainerStarted","Data":"06b73abff728334a5b71b18371cfb0af07aa78eb23a424c686b94c2ae1a9871d"} Nov 21 18:04:49 crc kubenswrapper[4967]: I1121 18:04:49.022869 4967 generic.go:334] "Generic (PLEG): container finished" podID="b10fa2dc-144e-4237-9d21-5fe871ba4ccb" containerID="06b73abff728334a5b71b18371cfb0af07aa78eb23a424c686b94c2ae1a9871d" exitCode=0 Nov 21 18:04:49 crc kubenswrapper[4967]: I1121 18:04:49.023025 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c72cj" event={"ID":"b10fa2dc-144e-4237-9d21-5fe871ba4ccb","Type":"ContainerDied","Data":"06b73abff728334a5b71b18371cfb0af07aa78eb23a424c686b94c2ae1a9871d"} Nov 21 18:04:50 crc kubenswrapper[4967]: I1121 18:04:50.038370 4967 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c72cj" event={"ID":"b10fa2dc-144e-4237-9d21-5fe871ba4ccb","Type":"ContainerStarted","Data":"e712702150c652e0005235b4136b61d120e5a422c346607d382f35dbf06674ed"} Nov 21 18:04:50 crc kubenswrapper[4967]: I1121 18:04:50.059647 4967 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c72cj" podStartSLOduration=2.34998385 podStartE2EDuration="11.059620063s" podCreationTimestamp="2025-11-21 18:04:39 +0000 UTC" firstStartedPulling="2025-11-21 18:04:40.891457318 +0000 UTC m=+8969.149978326" lastFinishedPulling="2025-11-21 18:04:49.601093541 +0000 UTC m=+8977.859614539" observedRunningTime="2025-11-21 18:04:50.057088011 +0000 UTC m=+8978.315609019" watchObservedRunningTime="2025-11-21 18:04:50.059620063 +0000 UTC m=+8978.318141071" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110124722024440 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110124723017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110102733016476 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110102733015446 5ustar corecore